text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
# (c) 2014, Brian Coca <bcoca@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import collections
import itertools
import math
from ansible import errors
from ansible.module_utils import basic
from ansible.module_utils.six.moves import zip, zip_longest
def unique(a):
if isinstance(a, collections.Hashable):
c = set(a)
else:
c = []
for x in a:
if x not in c:
c.append(x)
return c
def intersect(a, b):
if isinstance(a, collections.Hashable) and isinstance(b, collections.Hashable):
c = set(a) & set(b)
else:
c = unique(filter(lambda x: x in b, a))
return c
def difference(a, b):
if isinstance(a, collections.Hashable) and isinstance(b, collections.Hashable):
c = set(a) - set(b)
else:
c = unique(filter(lambda x: x not in b, a))
return c
def symmetric_difference(a, b):
if isinstance(a, collections.Hashable) and isinstance(b, collections.Hashable):
c = set(a) ^ set(b)
else:
c = unique(filter(lambda x: x not in intersect(a, b), union(a, b)))
return c
def union(a, b):
if isinstance(a, collections.Hashable) and isinstance(b, collections.Hashable):
c = set(a) | set(b)
else:
c = unique(a + b)
return c
def min(a):
_min = __builtins__.get('min')
return _min(a)
def max(a):
_max = __builtins__.get('max')
return _max(a)
def logarithm(x, base=math.e):
try:
if base == 10:
return math.log10(x)
else:
return math.log(x, base)
except TypeError as e:
raise errors.AnsibleFilterError('log() can only be used on numbers: %s' % str(e))
def power(x, y):
try:
return math.pow(x, y)
except TypeError as e:
raise errors.AnsibleFilterError('pow() can only be used on numbers: %s' % str(e))
def inversepower(x, base=2):
try:
if base == 2:
return math.sqrt(x)
else:
return math.pow(x, 1.0 / float(base))
except TypeError as e:
raise errors.AnsibleFilterError('root() can only be used on numbers: %s' % str(e))
def human_readable(size, isbits=False, unit=None):
''' Return a human readable string '''
try:
return basic.bytes_to_human(size, isbits, unit)
except:
raise errors.AnsibleFilterError("human_readable() can't interpret following string: %s" % size)
def human_to_bytes(size, default_unit=None, isbits=False):
''' Return bytes count from a human readable string '''
try:
return basic.human_to_bytes(size, default_unit, isbits)
except:
raise errors.AnsibleFilterError("human_to_bytes() can't interpret following string: %s" % size)
class FilterModule(object):
''' Ansible math jinja2 filters '''
def filters(self):
filters = {
# general math
'min': min,
'max': max,
# exponents and logarithms
'log': logarithm,
'pow': power,
'root': inversepower,
# set theory
'unique': unique,
'intersect': intersect,
'difference': difference,
'symmetric_difference': symmetric_difference,
'union': union,
# combinatorial
'permutations': itertools.permutations,
'combinations': itertools.combinations,
# computer theory
'human_readable': human_readable,
'human_to_bytes': human_to_bytes,
# zip
'zip': zip,
'zip_longest': zip_longest,
}
return filters
|
nrwahl2/ansible
|
lib/ansible/plugins/filter/mathstuff.py
|
Python
|
gpl-3.0
| 4,378
|
[
"Brian"
] |
e10d620055bb3b6c994b9e6140a358e1c699bfc202fba45bf9818019b9307113
|
# ============================================================================
#
# Copyright (C) 2007-2010 Conceptive Engineering bvba. All rights reserved.
# www.conceptive.be / project-camelot@conceptive.be
#
# This file is part of the Camelot Library.
#
# This file may be used under the terms of the GNU General Public
# License version 2.0 as published by the Free Software Foundation
# and appearing in the file license.txt included in the packaging of
# this file. Please review this information to ensure GNU
# General Public Licensing requirements will be met.
#
# If you are unsure which license is appropriate for your use, please
# visit www.python-camelot.com or contact project-camelot@conceptive.be
#
# This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
# WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
#
# For use of this library in commercial applications, please contact
# project-camelot@conceptive.be
#
# ============================================================================
"""Decorators to enhance the docstrings of classes
"""
def documented_entity():
"""Class decorator to append an image of the default view for
an entity to an entity class. The image can be generated by using
the testing framework to create images of all default views in an
application ::
@documented_entity()
class Movie(Entity):
'''A movie as played in the theater'''
title = Field(Unicode(50))
The resulting docstring of the Movie entity will be ::
'''A movie as played in the theater
image ../_static/entityviews/new_view_movie.png
'''
"""
def document_field( key, field ):
from elixir import Field
from elixir.relationships import Relationship
if isinstance(field, Field):
return '%s'%key
if isinstance(field, Relationship):
return '%s : refers to %s'%(key, unicode(field.of_kind))
def document_entity(model):
#
# Add documentation on its fields
#
documented_fields = []
for key, value in model.__dict__.items():
doc = document_field( key, value )
if doc:
documented_fields.append( doc )
model.__doc__ = (model.__doc__ or '') + """
.. image:: ../_static/entityviews/new_view_%s.png
**Fields** :
"""%(model.__name__.lower()) + ''.join('\n * %s'%(doc) for doc in documented_fields)
return model
return document_entity
def documented_type():
"""Class decorator to append an image of the default editor of
a field type to the docstring of the type"""
def document_type(field_type):
field_type.__doc__ = (field_type.__doc__ or '') + """
.. image:: ../_static/editors/%s_editable.png
"""
return field_type
return document_type
|
kurtraschke/camelot
|
camelot/core/document/__init__.py
|
Python
|
gpl-2.0
| 2,903
|
[
"VisIt"
] |
73e8ea903732b25d2a07c1b8fddd184d3b3b2d7e88639331db129e24f1198e24
|
# Version: 0.18-1
"""The Versioneer - like a rocketeer, but for versions.
The Versioneer
==============
* like a rocketeer, but for versions!
* https://github.com/warner/python-versioneer
* Brian Warner
* License: Public Domain
* Compatible With: python2.6, 2.7, 3.2, 3.3, 3.4, 3.5, 3.6, and pypy
* [![Latest Version]
(https://pypip.in/version/versioneer/badge.svg?style=flat)
](https://pypi.python.org/pypi/versioneer/)
* [![Build Status]
(https://travis-ci.org/warner/python-versioneer.png?branch=master)
](https://travis-ci.org/warner/python-versioneer)
This is a tool for managing a recorded version number in distutils-based
python projects. The goal is to remove the tedious and error-prone "update
the embedded version string" step from your release process. Making a new
release should be as easy as recording a new tag in your version-control
system, and maybe making new tarballs.
## Quick Install
* `pip install versioneer` to somewhere to your $PATH
* add a `[versioneer]` section to your setup.cfg (see below)
* run `versioneer install` in your source tree, commit the results
## Version Identifiers
Source trees come from a variety of places:
* a version-control system checkout (mostly used by developers)
* a nightly tarball, produced by build automation
* a snapshot tarball, produced by a web-based VCS browser, like github's
"tarball from tag" feature
* a release tarball, produced by "setup.py sdist", distributed through PyPI
Within each source tree, the version identifier (either a string or a number,
this tool is format-agnostic) can come from a variety of places:
* ask the VCS tool itself, e.g. "git describe" (for checkouts), which knows
about recent "tags" and an absolute revision-id
* the name of the directory into which the tarball was unpacked
* an expanded VCS keyword ($Id$, etc)
* a `_version.py` created by some earlier build step
For released software, the version identifier is closely related to a VCS
tag. Some projects use tag names that include more than just the version
string (e.g. "myproject-1.2" instead of just "1.2"), in which case the tool
needs to strip the tag prefix to extract the version identifier. For
unreleased software (between tags), the version identifier should provide
enough information to help developers recreate the same tree, while also
giving them an idea of roughly how old the tree is (after version 1.2, before
version 1.3). Many VCS systems can report a description that captures this,
for example `git describe --tags --dirty --always` reports things like
"0.7-1-g574ab98-dirty" to indicate that the checkout is one revision past the
0.7 tag, has a unique revision id of "574ab98", and is "dirty" (it has
uncommitted changes.
The version identifier is used for multiple purposes:
* to allow the module to self-identify its version: `myproject.__version__`
* to choose a name and prefix for a 'setup.py sdist' tarball
## Theory of Operation
Versioneer works by adding a special `_version.py` file into your source
tree, where your `__init__.py` can import it. This `_version.py` knows how to
dynamically ask the VCS tool for version information at import time.
`_version.py` also contains `$Revision$` markers, and the installation
process marks `_version.py` to have this marker rewritten with a tag name
during the `git archive` command. As a result, generated tarballs will
contain enough information to get the proper version.
To allow `setup.py` to compute a version too, a `versioneer.py` is added to
the top level of your source tree, next to `setup.py` and the `setup.cfg`
that configures it. This overrides several distutils/setuptools commands to
compute the version when invoked, and changes `setup.py build` and `setup.py
sdist` to replace `_version.py` with a small static file that contains just
the generated version data.
## Installation
See [INSTALL.md](./INSTALL.md) for detailed installation instructions.
## Version-String Flavors
Code which uses Versioneer can learn about its version string at runtime by
importing `_version` from your main `__init__.py` file and running the
`get_versions()` function. From the "outside" (e.g. in `setup.py`), you can
import the top-level `versioneer.py` and run `get_versions()`.
Both functions return a dictionary with different flavors of version
information:
* `['version']`: A condensed version string, rendered using the selected
style. This is the most commonly used value for the project's version
string. The default "pep440" style yields strings like `0.11`,
`0.11+2.g1076c97`, or `0.11+2.g1076c97.dirty`. See the "Styles" section
below for alternative styles.
* `['full-revisionid']`: detailed revision identifier. For Git, this is the
full SHA1 commit id, e.g. "1076c978a8d3cfc70f408fe5974aa6c092c949ac".
* `['date']`: Date and time of the latest `HEAD` commit. For Git, it is the
commit date in ISO 8601 format. This will be None if the date is not
available.
* `['dirty']`: a boolean, True if the tree has uncommitted changes. Note that
this is only accurate if run in a VCS checkout, otherwise it is likely to
be False or None
* `['error']`: if the version string could not be computed, this will be set
to a string describing the problem, otherwise it will be None. It may be
useful to throw an exception in setup.py if this is set, to avoid e.g.
creating tarballs with a version string of "unknown".
Some variants are more useful than others. Including `full-revisionid` in a
bug report should allow developers to reconstruct the exact code being tested
(or indicate the presence of local changes that should be shared with the
developers). `version` is suitable for display in an "about" box or a CLI
`--version` output: it can be easily compared against release notes and lists
of bugs fixed in various releases.
The installer adds the following text to your `__init__.py` to place a basic
version in `YOURPROJECT.__version__`:
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
## Styles
The setup.cfg `style=` configuration controls how the VCS information is
rendered into a version string.
The default style, "pep440", produces a PEP440-compliant string, equal to the
un-prefixed tag name for actual releases, and containing an additional "local
version" section with more detail for in-between builds. For Git, this is
TAG[+DISTANCE.gHEX[.dirty]] , using information from `git describe --tags
--dirty --always`. For example "0.11+2.g1076c97.dirty" indicates that the
tree is like the "1076c97" commit but has uncommitted changes (".dirty"), and
that this commit is two revisions ("+2") beyond the "0.11" tag. For released
software (exactly equal to a known tag), the identifier will only contain the
stripped tag, e.g. "0.11".
Other styles are available. See [details.md](details.md) in the Versioneer
source tree for descriptions.
## Debugging
Versioneer tries to avoid fatal errors: if something goes wrong, it will tend
to return a version of "0+unknown". To investigate the problem, run `setup.py
version`, which will run the version-lookup code in a verbose mode, and will
display the full contents of `get_versions()` (including the `error` string,
which may help identify what went wrong).
## Known Limitations
Some situations are known to cause problems for Versioneer. This details the
most significant ones. More can be found on Github
[issues page](https://github.com/warner/python-versioneer/issues).
### Subprojects
Versioneer has limited support for source trees in which `setup.py` is not in
the root directory (e.g. `setup.py` and `.git/` are *not* siblings). The are
two common reasons why `setup.py` might not be in the root:
* Source trees which contain multiple subprojects, such as
[Buildbot](https://github.com/buildbot/buildbot), which contains both
"master" and "slave" subprojects, each with their own `setup.py`,
`setup.cfg`, and `tox.ini`. Projects like these produce multiple PyPI
distributions (and upload multiple independently-installable tarballs).
* Source trees whose main purpose is to contain a C library, but which also
provide bindings to Python (and perhaps other langauges) in subdirectories.
Versioneer will look for `.git` in parent directories, and most operations
should get the right version string. However `pip` and `setuptools` have bugs
and implementation details which frequently cause `pip install .` from a
subproject directory to fail to find a correct version string (so it usually
defaults to `0+unknown`).
`pip install --editable .` should work correctly. `setup.py install` might
work too.
Pip-8.1.1 is known to have this problem, but hopefully it will get fixed in
some later version.
[Bug #38](https://github.com/warner/python-versioneer/issues/38) is tracking
this issue. The discussion in
[PR #61](https://github.com/warner/python-versioneer/pull/61) describes the
issue from the Versioneer side in more detail.
[pip PR#3176](https://github.com/pypa/pip/pull/3176) and
[pip PR#3615](https://github.com/pypa/pip/pull/3615) contain work to improve
pip to let Versioneer work correctly.
Versioneer-0.16 and earlier only looked for a `.git` directory next to the
`setup.cfg`, so subprojects were completely unsupported with those releases.
### Editable installs with setuptools <= 18.5
`setup.py develop` and `pip install --editable .` allow you to install a
project into a virtualenv once, then continue editing the source code (and
test) without re-installing after every change.
"Entry-point scripts" (`setup(entry_points={"console_scripts": ..})`) are a
convenient way to specify executable scripts that should be installed along
with the python package.
These both work as expected when using modern setuptools. When using
setuptools-18.5 or earlier, however, certain operations will cause
`pkg_resources.DistributionNotFound` errors when running the entrypoint
script, which must be resolved by re-installing the package. This happens
when the install happens with one version, then the egg_info data is
regenerated while a different version is checked out. Many setup.py commands
cause egg_info to be rebuilt (including `sdist`, `wheel`, and installing into
a different virtualenv), so this can be surprising.
[Bug #83](https://github.com/warner/python-versioneer/issues/83) describes
this one, but upgrading to a newer version of setuptools should probably
resolve it.
### Unicode version strings
While Versioneer works (and is continually tested) with both Python 2 and
Python 3, it is not entirely consistent with bytes-vs-unicode distinctions.
Newer releases probably generate unicode version strings on py2. It's not
clear that this is wrong, but it may be surprising for applications when then
write these strings to a network connection or include them in bytes-oriented
APIs like cryptographic checksums.
[Bug #71](https://github.com/warner/python-versioneer/issues/71) investigates
this question.
## Updating Versioneer
To upgrade your project to a new release of Versioneer, do the following:
* install the new Versioneer (`pip install -U versioneer` or equivalent)
* edit `setup.cfg`, if necessary, to include any new configuration settings
indicated by the release notes. See [UPGRADING](./UPGRADING.md) for details.
* re-run `versioneer install` in your source tree, to replace
`SRC/_version.py`
* commit any changed files
## Future Directions
This tool is designed to make it easily extended to other version-control
systems: all VCS-specific components are in separate directories like
src/git/ . The top-level `versioneer.py` script is assembled from these
components by running make-versioneer.py . In the future, make-versioneer.py
will take a VCS name as an argument, and will construct a version of
`versioneer.py` that is specific to the given VCS. It might also take the
configuration arguments that are currently provided manually during
installation by editing setup.py . Alternatively, it might go the other
direction and include code from all supported VCS systems, reducing the
number of intermediate scripts.
## License
To make Versioneer easier to embed, all its code is dedicated to the public
domain. The `_version.py` that it creates is also in the public domain.
Specifically, both are released under the Creative Commons "Public Domain
Dedication" license (CC0-1.0), as described in
https://creativecommons.org/publicdomain/zero/1.0/ .
"""
from __future__ import print_function
try:
import configparser
except ImportError:
import ConfigParser as configparser
import errno
import json
import os
import re
import subprocess
import sys
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_root():
"""Get the project root directory.
We require that all commands are run from the project root, i.e. the
directory that contains setup.py, setup.cfg, and versioneer.py .
"""
root = os.path.realpath(os.path.abspath(os.getcwd()))
setup_py = os.path.join(root, "setup.py")
versioneer_py = os.path.join(root, "versioneer.py")
if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)):
# allow 'python path/to/setup.py COMMAND'
root = os.path.dirname(os.path.realpath(os.path.abspath(sys.argv[0])))
setup_py = os.path.join(root, "setup.py")
versioneer_py = os.path.join(root, "versioneer.py")
if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)):
err = ("Versioneer was unable to run the project root directory. "
"Versioneer requires setup.py to be executed from "
"its immediate directory (like 'python setup.py COMMAND'), "
"or in a way that lets it use sys.argv[0] to find the root "
"(like 'python path/to/setup.py COMMAND').")
raise VersioneerBadRootError(err)
try:
# Certain runtime workflows (setup.py install/develop in a setuptools
# tree) execute all dependencies in a single python process, so
# "versioneer" may be imported multiple times, and python's shared
# module-import table will cache the first one. So we can't use
# os.path.dirname(__file__), as that will find whichever
# versioneer.py was first imported, even in later projects.
me = os.path.realpath(os.path.abspath(__file__))
me_dir = os.path.normcase(os.path.splitext(me)[0])
vsr_dir = os.path.normcase(os.path.splitext(versioneer_py)[0])
if me_dir != vsr_dir:
print("Warning: build in %s is using versioneer.py from %s"
% (os.path.dirname(me), versioneer_py))
except NameError:
pass
return root
def get_config_from_root(root):
"""Read the project setup.cfg file to determine Versioneer config."""
# This might raise EnvironmentError (if setup.cfg is missing), or
# configparser.NoSectionError (if it lacks a [versioneer] section), or
# configparser.NoOptionError (if it lacks "VCS="). See the docstring at
# the top of versioneer.py for instructions on writing your setup.cfg .
setup_cfg = os.path.join(root, "setup.cfg")
parser = configparser.SafeConfigParser()
with open(setup_cfg, "r") as f:
parser.readfp(f)
VCS = parser.get("versioneer", "VCS") # mandatory
def get(parser, name):
if parser.has_option("versioneer", name):
return parser.get("versioneer", name)
return None
cfg = VersioneerConfig()
cfg.VCS = VCS
cfg.style = get(parser, "style") or ""
cfg.versionfile_source = get(parser, "versionfile_source")
cfg.versionfile_build = get(parser, "versionfile_build")
cfg.tag_prefix = get(parser, "tag_prefix")
if cfg.tag_prefix in ("''", '""'):
cfg.tag_prefix = ""
cfg.parentdir_prefix = get(parser, "parentdir_prefix")
cfg.verbose = get(parser, "verbose")
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
# these dictionaries contain VCS-specific tools
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
"""Decorator to mark a method as the handler for a particular VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False,
env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None, None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
print("stdout was %s" % stdout)
return None, p.returncode
return stdout, p.returncode
LONG_VERSION_PY['git'] = '''
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.18 (https://github.com/warner/python-versioneer)
"""Git implementation of _version.py."""
import errno
import os
import re
import subprocess
import sys
def get_keywords():
"""Get the keywords needed to look up the version information."""
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "%(DOLLAR)sFormat:%%d%(DOLLAR)s"
git_full = "%(DOLLAR)sFormat:%%H%(DOLLAR)s"
git_date = "%(DOLLAR)sFormat:%%ci%(DOLLAR)s"
keywords = {"refnames": git_refnames, "full": git_full, "date": git_date}
return keywords
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_config():
"""Create, populate and return the VersioneerConfig() object."""
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "%(STYLE)s"
cfg.tag_prefix = "%(TAG_PREFIX)s"
cfg.parentdir_prefix = "%(PARENTDIR_PREFIX)s"
cfg.versionfile_source = "%(VERSIONFILE_SOURCE)s"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
"""Decorator to mark a method as the handler for a particular VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False,
env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %%s" %% dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %%s" %% (commands,))
return None, None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %%s (error)" %% dispcmd)
print("stdout was %%s" %% stdout)
return None, p.returncode
return stdout, p.returncode
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for i in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None, "date": None}
else:
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print("Tried directories %%s but none started with prefix %%s" %%
(str(rootdirs), parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird")
date = keywords.get("date")
if date is not None:
# git-2.2.0 added "%%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %%d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%%s', no digits" %% ",".join(refs - tags))
if verbose:
print("likely tags: %%s" %% ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %%s" %% r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None,
"date": date}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags", "date": None}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root,
hide_stderr=True)
if rc != 0:
if verbose:
print("Directory %%s not under git control" %% root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty",
"--always", "--long",
"--match", "%%s*" %% tag_prefix],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%%s'"
%% describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%%s' doesn't start with prefix '%%s'"
print(fmt %% (full_tag, tag_prefix))
pieces["error"] = ("tag '%%s' doesn't start with prefix '%%s'"
%% (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"],
cwd=root)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = run_command(GITS, ["show", "-s", "--format=%%ci", "HEAD"],
cwd=root)[0].strip()
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%%d.g%%s" %% (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%%d.g%%s" %% (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
"""TAG[.post.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post.devDISTANCE
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%%d" %% pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%%d" %% pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%%s" %% pieces["short"]
else:
# exception #1
rendered = "0.post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%%s" %% pieces["short"]
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Eexceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%%d-g%%s" %% (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%%d-g%%s" %% (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%%s'" %% style)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None,
"date": pieces.get("date")}
def get_versions():
"""Get version information or return default if unable to do so."""
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix,
verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in cfg.versionfile_source.split('/'):
root = os.path.dirname(root)
except NameError:
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree",
"date": None}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to compute version", "date": None}
'''
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird")
date = keywords.get("date")
if date is not None:
# git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs - tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %s" % r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None,
"date": date}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags", "date": None}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root,
hide_stderr=True)
if rc != 0:
if verbose:
print("Directory %s not under git control" % root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty",
"--always", "--long",
"--match", "%s*" % tag_prefix],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%s'"
% describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = ("tag '%s' doesn't start with prefix '%s'"
% (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"],
cwd=root)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"],
cwd=root)[0].strip()
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def do_vcs_install(manifest_in, versionfile_source, ipy):
"""Git-specific installation logic for Versioneer.
For Git, this means creating/changing .gitattributes to mark _version.py
for export-subst keyword substitution.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
files = [manifest_in, versionfile_source]
if ipy:
files.append(ipy)
try:
me = __file__
if me.endswith(".pyc") or me.endswith(".pyo"):
me = os.path.splitext(me)[0] + ".py"
versioneer_file = os.path.relpath(me)
except NameError:
versioneer_file = "versioneer.py"
files.append(versioneer_file)
present = False
try:
f = open(".gitattributes", "r")
for line in f.readlines():
if line.strip().startswith(versionfile_source):
if "export-subst" in line.strip().split()[1:]:
present = True
f.close()
except EnvironmentError:
pass
if not present:
f = open(".gitattributes", "a+")
f.write("%s export-subst\n" % versionfile_source)
f.close()
files.append(".gitattributes")
run_command(GITS, ["add", "--"] + files)
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for i in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None, "date": None}
else:
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print("Tried directories %s but none started with prefix %s" %
(str(rootdirs), parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
SHORT_VERSION_PY = """
# This file was generated by 'versioneer.py' (0.18) from
# revision-control system data, or from the parent directory name of an
# unpacked source archive. Distribution tarballs contain a pre-generated copy
# of this file.
import json
version_json = '''
%s
''' # END VERSION_JSON
def get_versions():
return json.loads(version_json)
"""
def versions_from_file(filename):
"""Try to determine the version from _version.py if present."""
try:
with open(filename) as f:
contents = f.read()
except EnvironmentError:
raise NotThisMethod("unable to read _version.py")
mo = re.search(r"version_json = '''\n(.*)''' # END VERSION_JSON",
contents, re.M | re.S)
if not mo:
mo = re.search(r"version_json = '''\r\n(.*)''' # END VERSION_JSON",
contents, re.M | re.S)
if not mo:
raise NotThisMethod("no version_json in _version.py")
return json.loads(mo.group(1))
def write_to_version_file(filename, versions):
"""Write the given version number to the given _version.py file."""
os.unlink(filename)
contents = json.dumps(versions, sort_keys=True,
indent=1, separators=(",", ": "))
with open(filename, "w") as f:
f.write(SHORT_VERSION_PY % contents)
print("set %s to '%s'" % (filename, versions["version"]))
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
"""TAG[.post.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post.devDISTANCE
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%d" % pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Eexceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None,
"date": pieces.get("date")}
class VersioneerBadRootError(Exception):
"""The project root directory is unknown or missing key files."""
def get_versions(verbose=False):
"""Get the project version from whatever source is available.
Returns dict with two keys: 'version' and 'full'.
"""
if "versioneer" in sys.modules:
# see the discussion in cmdclass.py:get_cmdclass()
del sys.modules["versioneer"]
root = get_root()
cfg = get_config_from_root(root)
assert cfg.VCS is not None, "please set [versioneer]VCS= in setup.cfg"
handlers = HANDLERS.get(cfg.VCS)
assert handlers, "unrecognized VCS '%s'" % cfg.VCS
verbose = verbose or cfg.verbose
assert cfg.versionfile_source is not None, \
"please set versioneer.versionfile_source"
assert cfg.tag_prefix is not None, "please set versioneer.tag_prefix"
versionfile_abs = os.path.join(root, cfg.versionfile_source)
# extract version from first of: _version.py, VCS command (e.g. 'git
# describe'), parentdir. This is meant to work for developers using a
# source checkout, for users of a tarball created by 'setup.py sdist',
# and for users of a tarball/zipball created by 'git archive' or github's
# download-from-tag feature or the equivalent in other VCSes.
get_keywords_f = handlers.get("get_keywords")
from_keywords_f = handlers.get("keywords")
if get_keywords_f and from_keywords_f:
try:
keywords = get_keywords_f(versionfile_abs)
ver = from_keywords_f(keywords, cfg.tag_prefix, verbose)
if verbose:
print("got version from expanded keyword %s" % ver)
return ver
except NotThisMethod:
pass
try:
ver = versions_from_file(versionfile_abs)
if verbose:
print("got version from file %s %s" % (versionfile_abs, ver))
return ver
except NotThisMethod:
pass
from_vcs_f = handlers.get("pieces_from_vcs")
if from_vcs_f:
try:
pieces = from_vcs_f(cfg.tag_prefix, root, verbose)
ver = render(pieces, cfg.style)
if verbose:
print("got version from VCS %s" % ver)
return ver
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
ver = versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
if verbose:
print("got version from parentdir %s" % ver)
return ver
except NotThisMethod:
pass
if verbose:
print("unable to compute version")
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None, "error": "unable to compute version",
"date": None}
def get_version():
"""Get the short version string for this project."""
return get_versions()["version"]
def get_cmdclass():
"""Get the custom setuptools/distutils subclasses used by Versioneer."""
if "versioneer" in sys.modules:
del sys.modules["versioneer"]
# this fixes the "python setup.py develop" case (also 'install' and
# 'easy_install .'), in which subdependencies of the main project are
# built (using setup.py bdist_egg) in the same python process. Assume
# a main project A and a dependency B, which use different versions
# of Versioneer. A's setup.py imports A's Versioneer, leaving it in
# sys.modules by the time B's setup.py is executed, causing B to run
# with the wrong versioneer. Setuptools wraps the sub-dep builds in a
# sandbox that restores sys.modules to it's pre-build state, so the
# parent is protected against the child's "import versioneer". By
# removing ourselves from sys.modules here, before the child build
# happens, we protect the child from the parent's versioneer too.
# Also see https://github.com/warner/python-versioneer/issues/52
cmds = {}
# we add "version" to both distutils and setuptools
from distutils.core import Command
class cmd_version(Command):
description = "report generated version string"
user_options = []
boolean_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
vers = get_versions(verbose=True)
print("Version: %s" % vers["version"])
print(" full-revisionid: %s" % vers.get("full-revisionid"))
print(" dirty: %s" % vers.get("dirty"))
print(" date: %s" % vers.get("date"))
if vers["error"]:
print(" error: %s" % vers["error"])
cmds["version"] = cmd_version
# we override "build_py" in both distutils and setuptools
#
# most invocation pathways end up running build_py:
# distutils/build -> build_py
# distutils/install -> distutils/build ->..
# setuptools/bdist_wheel -> distutils/install ->..
# setuptools/bdist_egg -> distutils/install_lib -> build_py
# setuptools/install -> bdist_egg ->..
# setuptools/develop -> ?
# pip install:
# copies source tree to a tempdir before running egg_info/etc
# if .git isn't copied too, 'git describe' will fail
# then does setup.py bdist_wheel, or sometimes setup.py install
# setup.py egg_info -> ?
# we override different "build_py" commands for both environments
if "setuptools" in sys.modules:
from setuptools.command.build_py import build_py as _build_py
else:
from distutils.command.build_py import build_py as _build_py
class cmd_build_py(_build_py):
def run(self):
root = get_root()
cfg = get_config_from_root(root)
versions = get_versions()
_build_py.run(self)
# now locate _version.py in the new build/ directory and replace
# it with an updated value
if cfg.versionfile_build:
target_versionfile = os.path.join(self.build_lib,
cfg.versionfile_build)
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile, versions)
cmds["build_py"] = cmd_build_py
if "setuptools" in sys.modules:
from setuptools.command.build_ext import build_ext as _build_ext
else:
from distutils.command.build_ext import build_ext as _build_ext
class cmd_build_ext(_build_ext):
def run(self):
root = get_root()
cfg = get_config_from_root(root)
versions = get_versions()
_build_ext.run(self)
if self.inplace:
# build_ext --inplace will only build modules in
# build/lib<..> dir with no _version.py to write to.
# As in place builds will already have a _version.py
# in the module dir, we do not need to write one.
return
# now locate _version.py in the new build/ directory and replace
# it with an updated value
target_versionfile = os.path.join(self.build_lib,
cfg.versionfile_source)
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile, versions)
cmds["build_ext"] = cmd_build_ext
if "cx_Freeze" in sys.modules: # cx_freeze enabled?
from cx_Freeze.dist import build_exe as _build_exe
# nczeczulin reports that py2exe won't like the pep440-style string
# as FILEVERSION, but it can be used for PRODUCTVERSION, e.g.
# setup(console=[{
# "version": versioneer.get_version().split("+", 1)[0], # FILEVERSION
# "product_version": versioneer.get_version(),
# ...
class cmd_build_exe(_build_exe):
def run(self):
root = get_root()
cfg = get_config_from_root(root)
versions = get_versions()
target_versionfile = cfg.versionfile_source
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile, versions)
_build_exe.run(self)
os.unlink(target_versionfile)
with open(cfg.versionfile_source, "w") as f:
LONG = LONG_VERSION_PY[cfg.VCS]
f.write(LONG %
{"DOLLAR": "$",
"STYLE": cfg.style,
"TAG_PREFIX": cfg.tag_prefix,
"PARENTDIR_PREFIX": cfg.parentdir_prefix,
"VERSIONFILE_SOURCE": cfg.versionfile_source,
})
cmds["build_exe"] = cmd_build_exe
del cmds["build_py"]
if 'py2exe' in sys.modules: # py2exe enabled?
try:
from py2exe.distutils_buildexe import py2exe as _py2exe # py3
except ImportError:
from py2exe.build_exe import py2exe as _py2exe # py2
class cmd_py2exe(_py2exe):
def run(self):
root = get_root()
cfg = get_config_from_root(root)
versions = get_versions()
target_versionfile = cfg.versionfile_source
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile, versions)
_py2exe.run(self)
os.unlink(target_versionfile)
with open(cfg.versionfile_source, "w") as f:
LONG = LONG_VERSION_PY[cfg.VCS]
f.write(LONG %
{"DOLLAR": "$",
"STYLE": cfg.style,
"TAG_PREFIX": cfg.tag_prefix,
"PARENTDIR_PREFIX": cfg.parentdir_prefix,
"VERSIONFILE_SOURCE": cfg.versionfile_source,
})
cmds["py2exe"] = cmd_py2exe
# we override different "sdist" commands for both environments
if "setuptools" in sys.modules:
from setuptools.command.sdist import sdist as _sdist
else:
from distutils.command.sdist import sdist as _sdist
class cmd_sdist(_sdist):
def run(self):
versions = get_versions()
self._versioneer_generated_versions = versions
# unless we update this, the command will keep using the old
# version
self.distribution.metadata.version = versions["version"]
return _sdist.run(self)
def make_release_tree(self, base_dir, files):
root = get_root()
cfg = get_config_from_root(root)
_sdist.make_release_tree(self, base_dir, files)
# now locate _version.py in the new base_dir directory
# (remembering that it may be a hardlink) and replace it with an
# updated value
target_versionfile = os.path.join(base_dir, cfg.versionfile_source)
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile,
self._versioneer_generated_versions)
cmds["sdist"] = cmd_sdist
return cmds
CONFIG_ERROR = """
setup.cfg is missing the necessary Versioneer configuration. You need
a section like:
[versioneer]
VCS = git
style = pep440
versionfile_source = src/myproject/_version.py
versionfile_build = myproject/_version.py
tag_prefix =
parentdir_prefix = myproject-
You will also need to edit your setup.py to use the results:
import versioneer
setup(version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(), ...)
Please read the docstring in ./versioneer.py for configuration instructions,
edit setup.cfg, and re-run the installer or 'python versioneer.py setup'.
"""
SAMPLE_CONFIG = """
# See the docstring in versioneer.py for instructions. Note that you must
# re-run 'versioneer.py setup' after changing this section, and commit the
# resulting files.
[versioneer]
#VCS = git
#style = pep440
#versionfile_source =
#versionfile_build =
#tag_prefix =
#parentdir_prefix =
"""
INIT_PY_SNIPPET = """
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
"""
def do_setup():
"""Main VCS-independent setup function for installing Versioneer."""
root = get_root()
try:
cfg = get_config_from_root(root)
except (EnvironmentError, configparser.NoSectionError,
configparser.NoOptionError) as e:
if isinstance(e, (EnvironmentError, configparser.NoSectionError)):
print("Adding sample versioneer config to setup.cfg",
file=sys.stderr)
with open(os.path.join(root, "setup.cfg"), "a") as f:
f.write(SAMPLE_CONFIG)
print(CONFIG_ERROR, file=sys.stderr)
return 1
print(" creating %s" % cfg.versionfile_source)
with open(cfg.versionfile_source, "w") as f:
LONG = LONG_VERSION_PY[cfg.VCS]
f.write(LONG % {"DOLLAR": "$",
"STYLE": cfg.style,
"TAG_PREFIX": cfg.tag_prefix,
"PARENTDIR_PREFIX": cfg.parentdir_prefix,
"VERSIONFILE_SOURCE": cfg.versionfile_source,
})
ipy = os.path.join(os.path.dirname(cfg.versionfile_source),
"__init__.py")
if os.path.exists(ipy):
try:
with open(ipy, "r") as f:
old = f.read()
except EnvironmentError:
old = ""
if INIT_PY_SNIPPET not in old:
print(" appending to %s" % ipy)
with open(ipy, "a") as f:
f.write(INIT_PY_SNIPPET)
else:
print(" %s unmodified" % ipy)
else:
print(" %s doesn't exist, ok" % ipy)
ipy = None
# Make sure both the top-level "versioneer.py" and versionfile_source
# (PKG/_version.py, used by runtime code) are in MANIFEST.in, so
# they'll be copied into source distributions. Pip won't be able to
# install the package without this.
manifest_in = os.path.join(root, "MANIFEST.in")
simple_includes = set()
try:
with open(manifest_in, "r") as f:
for line in f:
if line.startswith("include "):
for include in line.split()[1:]:
simple_includes.add(include)
except EnvironmentError:
pass
# That doesn't cover everything MANIFEST.in can do
# (http://docs.python.org/2/distutils/sourcedist.html#commands), so
# it might give some false negatives. Appending redundant 'include'
# lines is safe, though.
if "versioneer.py" not in simple_includes:
print(" appending 'versioneer.py' to MANIFEST.in")
with open(manifest_in, "a") as f:
f.write("include versioneer.py\n")
else:
print(" 'versioneer.py' already in MANIFEST.in")
if cfg.versionfile_source not in simple_includes:
print(" appending versionfile_source ('%s') to MANIFEST.in" %
cfg.versionfile_source)
with open(manifest_in, "a") as f:
f.write("include %s\n" % cfg.versionfile_source)
else:
print(" versionfile_source already in MANIFEST.in")
# Make VCS-specific changes. For git, this means creating/changing
# .gitattributes to mark _version.py for export-subst keyword
# substitution.
do_vcs_install(manifest_in, cfg.versionfile_source, ipy)
return 0
def scan_setup_py():
"""Validate the contents of setup.py against Versioneer's expectations."""
found = set()
setters = False
errors = 0
with open("setup.py", "r") as f:
for line in f.readlines():
if "import versioneer" in line:
found.add("import")
if "versioneer.get_cmdclass()" in line:
found.add("cmdclass")
if "versioneer.get_version()" in line:
found.add("get_version")
if "versioneer.VCS" in line:
setters = True
if "versioneer.versionfile_source" in line:
setters = True
if len(found) != 3:
print("")
print("Your setup.py appears to be missing some important items")
print("(but I might be wrong). Please make sure it has something")
print("roughly like the following:")
print("")
print(" import versioneer")
print(" setup( version=versioneer.get_version(),")
print(" cmdclass=versioneer.get_cmdclass(), ...)")
print("")
errors += 1
if setters:
print("You should remove lines like 'versioneer.VCS = ' and")
print("'versioneer.versionfile_source = ' . This configuration")
print("now lives in setup.cfg, and should be removed from setup.py")
print("")
errors += 1
return errors
if __name__ == "__main__":
cmd = sys.argv[1]
if cmd == "setup":
errors = do_setup()
errors += scan_setup_py()
if errors:
sys.exit(1)
|
ParallelSSH/ssh2-python
|
versioneer.py
|
Python
|
lgpl-2.1
| 69,737
|
[
"Brian"
] |
2d9b44a43fc78c3218a471803ba5573d8ce4d3c77148111ee3f230355487bd7d
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests/Examples for dThetaXZ
TODO
====
"""
# Fix Python 2.x
try:
input = raw_input
except NameError:
pass
import os, sys
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import gridspec
from sloth.inst.dthetaxz import (
dThetaXZ,
mapCase2Num,
mapNum2Case,
getMeshMasked,
getDthetaDats,
writeScanDats,
)
from sloth.inst.dthetaxz_plot import plotEffScatt, plotScanThetaFile
#: TESTS
def test009():
"""effective scattering figure (updt: 2014-08-15)"""
mxx1, mzz1 = getMeshMasked(
mask="circular", r1p=500.0, cryst_x=50.0, cryst_z=50.0, csteps=500j
)
wrc = 1.25e-4
cases = ["Jn", "Js", "SphJn", "TorJs"]
casesLabs = ["1. Johann", "2. Johansson", "3. Spherical Jn", "4. Toroidal Js"]
angles = [35, 55, 75]
plotEffScatt(
mxx1,
mzz1,
wrc=wrc,
cases=cases,
angles=angles,
nlevels=30,
plotMask=True,
absWrc=False,
casesLabels=casesLabs,
xyFigSize=(8 * 150, 4.3 * 150),
figName="test009",
)
def test009b():
"""effective scattering figure (updt: 2014-08-21)"""
mxx1, mzz1 = getMeshMasked(
mask="circular", r1p=500.0, cryst_x=50.0, cryst_z=50.0, csteps=500j
)
wrc = 1.25e-4
cases = ["Jn", "Js", "SphJn", "TorJs", "JsFocus"]
casesLabs = [
"1. Johann",
"2. Johansson",
"3. Spherical Jn",
"4. Toroidal Js",
"5. Gen. Js focus",
]
angles = [35, 55, 75]
plotEffScatt(
mxx1,
mzz1,
wrc=wrc,
cases=cases,
casesLabels=casesLabs,
angles=angles,
nlevels=30,
plotMask=True,
absWrc=False,
xyFigSize=(8.3 * 150, 3.7 * 150),
figName="test009b",
fontSize=9,
colSpan=2,
xyTicks=0.1,
)
def test009c(retDats=False, showPlot=True):
"""effective scattering figure (updt: 2014-09-03)"""
mxx1, mzz1 = getMeshMasked(
mask="circular", r1p=500.0, cryst_x=50.0, cryst_z=50.0, csteps=500j
)
wrc = 1.25e-4
cases = ["Jn", "Js", "SphJn", "TorJs", "JsFocus"]
casesLabs = [
"1. Johann",
"2. Johansson",
"3. Spherical Jn",
"4. Toroidal Js",
"5. Gen. Js focus",
]
angles = [15, 45, 75]
if showPlot:
plotEffScatt(
mxx1,
mzz1,
wrc=wrc,
cases=cases,
casesLabels=casesLabs,
angles=angles,
nlevels=30,
plotMask=True,
absWrc=False,
xyFigSize=(8.3 * 150, 3.7 * 150),
figName="test009c",
fontSize=9,
colSpan=2,
xyTicks=0.1,
)
if retDats:
return getDthetaDats(mxx1, mzz1, wrc=wrc, cases=cases, angles=angles)
def test009d():
"""effective scattering figure (updt: 2015-02-12) """
wrc = 1.25e-4
cases = ["SphJn", "Js", "TorJs"]
casesLabs = ["1. Spherical", "2. Johansson", "3. Toroidal Js"]
angles = [35, 55, 75]
rd = 500.0 # bending radius
msks = ["circular", "rectangular"]
mxx1, mzz1 = getMeshMasked(
mask=msks[0], r1p=rd, cryst_x=50.0, cryst_z=50.0, csteps=500j
)
mxx2, mzz2 = getMeshMasked(
mask=msks[1], r1p=rd, cryst_x=50.0, cryst_z=12.5, csteps=500j
)
mzz3, mxx3 = getMeshMasked(
mask=msks[1], r1p=rd, cryst_x=50.0, cryst_z=17.5, csteps=500j
)
mxx4, mzz4 = getMeshMasked(
mask=msks[1], r1p=rd, cryst_x=50.0, cryst_z=25.0, csteps=500j
)
# all circular
plotEffScatt(
mxx1,
mzz1,
wrc=wrc,
cases=cases,
casesLabels=casesLabs,
angles=angles,
xlabel=r"x, sag. (R$_{1}^{\prime}$)",
ylabel=r"z, mer. (R$_{1}^{\prime}$)",
nlevels=30,
xyFigHalfRange=0.1,
plotMask=True,
plotVert=True,
absWrc=False,
xyFigSize=(6.0 * 150, 4.0 * 150),
xylab=(0.04, 0.96),
figName="{0}mm.{1}".format(int(rd), msks[0]),
fontSize=9,
colSpan=2,
xyTicks=0.1,
)
# js rect
lmxx = [mxx1, mxx2, mxx1]
lmzz = [mzz1, mzz2, mzz1]
plotEffScatt(
lmxx,
lmzz,
wrc=wrc,
cases=cases,
casesLabels=casesLabs,
angles=angles,
xlabel=r"x, sag. (R$_{1}^{\prime}$)",
ylabel=r"z, mer. (R$_{1}^{\prime}$)",
nlevels=30,
xyFigHalfRange=0.1,
plotMask=True,
plotVert=True,
absWrc=False,
xyFigSize=(6.0 * 150, 4.0 * 150),
xylab=(0.04, 0.96),
figName="{0}mm.{1}".format(int(rd), msks[1]),
fontSize=9,
colSpan=2,
xyTicks=0.1,
)
input("Press ENTER to close figures")
def test010():
"""multiple effective scattering figures (updt: 2014-06-29)"""
for rd in [1000.0, 500.0]:
for msk, cx, cz in zip(["circular", "rectangular"], [50.0, 40.0], [50.0, 12.5]):
mxx1, mzz1 = getMeshMasked(
mask=msk, r1p=rd, cryst_x=cx, cryst_z=cz, csteps=500j
)
plotEffScatt(
mxx1,
mzz1,
wrc=1e-4,
cases=["Johansson", "Spherical Jn", "Spherical Js", "Toroidal Js"],
angles=[35, 55, 75],
nlevels=30,
plotMask=True,
absWrc=False,
figName="{0}mm.{1}".format(int(rd), msk),
xyFigHalfRange=0.1,
xyFigSize=(8 * 150, 4.3 * 150),
)
def plotDats011(_d):
"""buggy"""
fig = plt.figure(num="plotDats011", figsize=(5, 5), dpi=150)
gs = gridspec.GridSpec(1, 2)
for ird, rd in enumerate(_d["rds"]):
gsplt = plt.subplot(gs[ird])
for msk in _d["msks"]:
if msk == "circular":
_ls = "-"
_mk = None
# _mk = 'o'
_ms = 2
mC = 1.0
else:
_ls = "--"
mC = 3.0
_mk = None
_ms = 2
lab = "{0}mm.{1}".format(int(rd), msk)
for cs, cl in zip(_d["cases"], _d["colors"]):
gsplt.plot(
_d[lab][cs]["thetaB"],
np.array(_d[lab][cs]["sa"]) * mC,
lw=2,
color=cl,
ls=_ls,
marker=_mk,
ms=_ms,
label=r"{0} $\times$ {1} {2}".format(int(mC), msk[:4], cs),
)
gsplt.set_ylim(0.0, 0.05)
gsplt.set_xlabel(r"Bragg angle $\theta_B$ (deg)")
gsplt.set_ylabel(r"Effective solid angle (sr)")
gsplt.set_title(r"Rect vs Circ at {0} mm bending".format(int(rd)))
gsplt.legend(loc="best")
plt.tight_layout()
plt.show()
return fig
def test011(retDats=True, plotDats=False):
"""angular study for analyser shapes: circular 50^2 vs rectangular 80x25"""
_d = {} # container
_d["rds"] = [1000.0, 500.0]
# _d['cases'] = ['Johansson', 'Spherical Jn', 'Toroidal Js', 'Spherical Js', 'Js 45 deg focusing', 'Berreman']
# _d['cases'] = ['Johansson', 'Spherical Jn', 'Toroidal Js']
_d["colors"] = ["blue", "green", "red", "orange"]
_d["angles"] = np.linspace(15, 85, 29)
_d["msks"] = ["circular", "rectangular"]
_d["cxs"] = [50.0, 40.0]
_d["czs"] = [50.0, 12.5]
_d["csteps"] = 500j
_d["wrc"] = 1.25e-4
for rd in _d["rds"]:
for msk, cx, cz in zip(_d["msks"], _d["cxs"], _d["czs"]):
mxx, mzz = getMeshMasked(
mask=msk, r1p=rd, cryst_x=cx, cryst_z=cz, csteps=_d["csteps"]
)
lab = "{0}mm.{1}".format(int(rd), msk)
print("{0}:".format(lab))
_d["label"] = lab
_d[lab] = getDthetaDats(
mxx, mzz, wrc=_d["wrc"], cases=_d["cases"], angles=_d["angles"]
)
#
if plotDats:
fig011 = plotDats011(_d)
if retDats:
return _d
def plotDats012(_d):
"""buggy"""
fig = plt.figure(num="plotDats012", figsize=(5, 5), dpi=150)
# gs = gridspec.GridSpec(1,2)
gs = []
gs.append(fig.add_subplot(211))
gs.append(fig.add_subplot(212))
cs = _d["cases"]
_ls = 2 # line size
_mk = None # marker style
_ms = 5 # marker size
for ird, rd in enumerate(_d["rds"]):
gsplt = plt.subplot(gs[ird])
for cz, cl in zip(_d["czs"], _d["colors"]):
lab = "{0}mm/{1}".format(int(rd), cz)
gsplt.plot(
_d[lab][cs]["thetaB"],
_d[lab][cs]["eres"],
lw=2,
color=cl,
ls=_ls,
marker=_mk,
ms=_ms,
label=r"{0}mm".format(cz * 2),
)
# gsplt.set_ylim(0.,0.05)
gsplt.set_xlabel(r"Bragg angle $\theta_B$ (deg)")
gsplt.set_ylabel(r"Energy resolution $\frac{\Delta E}{E}$")
gsplt.set_title(r"Js 80 mm height at {0} mm bending".format(int(rd)))
gsplt.legend(loc="best")
plt.tight_layout()
plt.show()
return fig
def test012(retDats=True):
""" js energy resolution vs rectangular crystal size width """
d = {} # container
d["fname"] = "dth_test012.spec"
d["rds"] = [1000.0, 500.0]
d["cases"] = ["Js"]
d["angles"] = np.linspace(35, 85, 21)
d["msks"] = "rectangular"
d["cxs"] = 40.0
d["czs"] = [2.5, 5.0, 7.5, 10.0, 12.5, 15.0]
d["csteps"] = 500j
d["wrc"] = 1.25e-4
for rd in d["rds"]:
# rectangular Js
for cz in d["czs"]:
mxx, mzz = getMeshMasked(
mask=d["msks"], r1p=rd, cryst_x=d["cxs"], cryst_z=cz, csteps=d["csteps"]
)
lab = "{0}/{1}mm/{2}".format(d["cases"][0], int(rd), cz * 2)
motpos = [
mapCase2Num(d["cases"][0]),
rd,
d["msks"],
d["cxs"],
cz,
d["wrc"],
d["csteps"],
]
print("{0}:".format(lab))
d[lab] = getDthetaDats(
mxx, mzz, wrc=d["wrc"], cases=d["cases"], angles=d["angles"]
)
writeScanDats(d[lab], d["fname"], scanLabel=lab, motpos=motpos)
# Spherical plate, Wittry and General point focus 80x50 mm^2 for comparison
for case in ["SphJn", "TorJs", "JsFocus"]:
cz = 25.0
mxx, mzz = getMeshMasked(
mask=d["msks"], r1p=rd, cryst_x=d["cxs"], cryst_z=cz, csteps=d["csteps"]
)
lab = "{0}/{1}mm/{2}".format(case, int(rd), cz * 2)
motpos = [
mapCase2Num(case),
rd,
d["msks"],
d["cxs"],
cz,
d["wrc"],
d["csteps"],
]
print("{0}:".format(lab))
d[lab] = getDthetaDats(
mxx, mzz, wrc=d["wrc"], cases=[case], angles=d["angles"]
)
writeScanDats(d[lab], d["fname"], scanLabel=lab, motpos=motpos)
#
if retDats:
return d
def test013(retDats=True):
"""energy resolution"""
d = {} # container
d["fname"] = "dth_test013.spec"
d["rds"] = [1000.0, 500.0]
d["cases"] = ["Js", "SphJn", "TorJs", "JsFocus"]
d["angles"] = np.linspace(35, 85, 21)
d["cxs"] = 50.0
d["csteps"] = 500j
d["wrc"] = 2e-4
for rd in d["rds"]:
for case in d["cases"]:
if case == "Js":
# for Js need to use an optimized mask in z
d["msks"] = "rectangular"
d["czs"] = 12.5
else:
d["msks"] = "circular"
d["czs"] = 50.0
mxx, mzz = getMeshMasked(
mask=d["msks"],
r1p=rd,
cryst_x=d["cxs"],
cryst_z=d["czs"],
csteps=d["csteps"],
)
lab = "{0}/{1}mm/{2}{3}".format(
case, int(rd), d["msks"][:4], int(d["czs"] * 2)
)
motpos = [
mapCase2Num(case),
rd,
d["msks"],
d["cxs"],
d["czs"],
d["wrc"],
d["csteps"],
]
print("{0}:".format(lab))
d[lab] = getDthetaDats(
mxx, mzz, wrc=d["wrc"], cases=[case], angles=d["angles"]
)
writeScanDats(d[lab], d["fname"], scanLabel=lab, motpos=motpos)
#
if retDats:
return d
if __name__ == "__main__":
# pass
### TESTS ###
# uncomment at your convenience
# utils
# from genericutils import ipythonAutoreload, getPyMcaMain
# ipythonAutoreload()
# m = getPyMcaMain()
# mxx1, mzz1 = test009(retDats=True)
# test009()
# d = test011(retDats=True, plotDats=False)
# d = test012(retDats=True)
# plotScanThetaFile('dth_test012.spec', str2rng('5, 7, 8, 13, 15, 16'), signal='eres', plotDeeShells=True, figName='fig1', showLegend=True)
# plotScanThetaFile('dth_test012.spec', str2rng('5, 7, 8, 13, 15, 16'), signal='eres', plotDeeShells=True, figName='figEres', showLegend=True, xlims=(34,86), ylims=(9E-6, 1.1E-2), figSize=(3.5,6))
# plotScanThetaFile('dth_test012.spec', str2rng('5, 7, 8, 13, 15, 16'), signal='sa', plotDeeShells=False, figName='figSA', showLegend=False, xlims=(34,86), ylims=None, figSize=(4.5,6), ylog=False, yscale=1)
# plotScanThetaFile('dth_test013.spec', str2rng('1:8'), signal='eres', plotDeeShells=True, figName='figEres', showLegend=True, xlims=(34,86), ylims=(9E-6, 1.1E-2), figSize=(4.5,6), ylog=True, yscale=1)
# plotScanThetaFile('dth_test013.spec', str2rng('1:8'), signal='eres', plotDeeShells=True, figName='figEres', showLegend=True, xlims=(34,86), ylims=(9E-6, 1.1E-2), figSize=(3,4), ylog=True, yscale=1)
#
# mxx1, mzz1 = test009c(retDats=True, showPlot=False)
test009d()
|
maurov/xraysloth
|
examples/dthetaxz_tests.py
|
Python
|
bsd-3-clause
| 14,078
|
[
"CRYSTAL"
] |
2dd96e0ca4a17bfc45071944fb4fdf0b69c330254e259f81ff81ddff96e20fc1
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# The MIT License (MIT)
# Copyright (c) 2020 Daniel Schick
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
# OR OTHER DEALINGS IN THE SOFTWARE.
__all__ = ['Xray', 'XrayKin', 'XrayDyn', 'XrayDynMag']
__docformat__ = 'restructuredtext'
from .simulation import Simulation
from ..structures.layers import AmorphousLayer, UnitCell
from .. import u, Q_
from ..helpers import make_hash_md5, m_power_x, m_times_n, finderb
import numpy as np
import scipy.constants as constants
from time import time
from os import path
from tqdm.notebook import trange
r_0 = constants.physical_constants['classical electron radius'][0]
class Xray(Simulation):
r"""Xray
Base class for all X-ray scattering simulations.
Args:
S (Structure): sample to do simulations with.
force_recalc (boolean): force recalculation of results.
Keyword Args:
save_data (boolean): true to save simulation results.
cache_dir (str): path to cached data.
disp_messages (boolean): true to display messages from within the
simulations.
progress_bar (boolean): enable tqdm progress bar.
Attributes:
S (Structure): sample structure to calculate simulations on.
force_recalc (boolean): force recalculation of results.
save_data (boolean): true to save simulation results.
cache_dir (str): path to cached data.
disp_messages (boolean): true to display messages from within the
simulations.
progress_bar (boolean): enable tqdm progress bar.
energy (ndarray[float]): photon energies :math:`E` of scattering light
wl (ndarray[float]): wavelengths :math:`\lambda` of scattering light
k (ndarray[float]): wavenumber :math:`k` of scattering light
theta (ndarray[float]): incidence angles :math:`\theta` of scattering
light
qz (ndarray[float]): scattering vector :math:`q_z` of scattering light
polarizations (dict): polarization states and according names.
pol_in_state (int): incoming polarization state as defined in
polarizations dict.
pol_out_state (int): outgoing polarization state as defined in
polarizations dict.
pol_in (float): incoming polarization factor (can be a complex ndarray).
pol_out (float): outgoing polarization factor (can be a complex ndarray).
"""
def __init__(self, S, force_recalc, **kwargs):
super().__init__(S, force_recalc, **kwargs)
self._energy = np.array([])
self._wl = np.array([])
self._k = np.array([])
self._theta = np.zeros([1, 1])
self._qz = np.zeros([1, 1])
self.polarizations = {0: 'unpolarized',
1: 'circ +',
2: 'circ -',
3: 'sigma',
4: 'pi'}
self.pol_in_state = 3 # sigma
self.pol_out_state = 0 # no-analyzer
self.pol_in = None
self.pol_out = None
self.set_polarization(self.pol_in_state, self.pol_out_state)
def __str__(self, output=[]):
"""String representation of this class"""
output = [['energy', self.energy[0] if np.size(self.energy) == 1 else
'{:f} .. {:f}'.format(np.min(self.energy), np.max(self.energy))],
['wavelength', self.wl[0] if np.size(self.wl) == 1 else
'{:f} .. {:f}'.format(np.min(self.wl), np.max(self.wl))],
['wavenumber', self.k[0] if np.size(self.k) == 1 else
'{:f} .. {:f}'.format(np.min(self.k), np.max(self.k))],
['theta', self.theta[0] if np.size(self.theta) == 1 else
'{:f} .. {:f}'.format(np.min(self.theta), np.max(self.theta))],
['q_z', self.qz[0] if np.size(self.qz) == 1 else
'{:f} .. {:f}'.format(np.min(self.qz), np.max(self.qz))],
['incoming polarization', self.polarizations[self.pol_in_state]],
['analyzer polarization', self.polarizations[self.pol_out_state]],
] + output
return super().__str__(output)
def set_incoming_polarization(self, pol_in_state):
"""set_incoming_polarization
Must be overwritten by child classes.
Args:
pol_in_state (int): incoming polarization state id.
"""
raise NotImplementedError
def set_outgoing_polarization(self, pol_out_state):
"""set_outgoing_polarization
Must be overwritten by child classes.
Args:
pol_out_state (int): outgoing polarization state id.
"""
raise NotImplementedError
def set_polarization(self, pol_in_state, pol_out_state):
"""set_polarization
Sets the incoming and analyzer (outgoing) polarization.
Args:
pol_in_state (int): incoming polarization state id.
pol_out_state (int): outgoing polarization state id.
"""
self.set_incoming_polarization(pol_in_state)
self.set_outgoing_polarization(pol_out_state)
def get_hash(self, strain_vectors, **kwargs):
"""get_hash
Calculates an unique hash given by the energy :math:`E`,
:math:`q_z` range, polarization states and the ``strain_vectors`` as
well as the sample structure hash for relevant x-ray parameters.
Optionally, part of the strain_map is used.
Args:
strain_vectors (dict{ndarray[float]}): reduced strains per unique
layer.
**kwargs (ndarray[float]): spatio-temporal strain profile.
Returns:
hash (str): unique hash.
"""
param = [self.pol_in_state, self.pol_out_state, self._qz, self._energy, strain_vectors]
if 'strain_map' in kwargs:
strain_map = kwargs.get('strain_map')
if np.size(strain_map) > 1e6:
strain_map = strain_map.flatten()[0:1000000]
param.append(strain_map)
return self.S.get_hash(types='xray') + '_' + make_hash_md5(param)
def get_polarization_factor(self, theta):
r"""get_polarization_factor
Calculates the polarization factor :math:`P(\vartheta)` for a given
incident angle :math:`\vartheta` for the case of `s`-polarization
(pol = 0), or `p`-polarization (pol = 1), or unpolarized X-rays
(pol = 0.5):
.. math::
P(\vartheta) = \sqrt{(1-\mbox{pol}) + \mbox{pol} \cdot \cos(2\vartheta)}
Args:
theta (ndarray[float]): incidence angle.
Returns:
P (ndarray[float]): polarization factor.
"""
return np.sqrt((1-self.pol_in) + self.pol_in*np.cos(2*theta)**2)
def update_experiment(self, caller):
r"""update_experiment
Recalculate energy, wavelength, and wavevector as well as theta
and the scattering vector in case any of these has changed.
.. math::
\lambda & = \frac{hc}{E} \\
E & = \frac{hc}{\lambda} \\
k & = \frac{2\pi}{\lambda} \\
\vartheta & = \arcsin{\frac{\lambda q_z}{4\pi}} \\
q_z & = 2k \sin{\vartheta}
Args:
caller (str): name of calling method.
"""
from scipy import constants
if caller != 'energy':
if caller == 'wl': # calc energy from wavelength
self._energy = Q_((constants.h*constants.c)/self._wl, 'J').to('eV').magnitude
elif caller == 'k': # calc energy von wavevector
self._energy = \
Q_((constants.h*constants.c)/(2*np.pi/self._k), 'J').to('eV').magnitude
if caller != 'wl':
if caller == 'energy': # calc wavelength from energy
self._wl = (constants.h*constants.c)/self.energy.to('J').magnitude
elif caller == 'k': # calc wavelength from wavevector
self._wl = 2*np.pi/self._k
if caller != 'k':
if caller == 'energy': # calc wavevector from energy
self._k = 2*np.pi/self._wl
elif caller == 'wl': # calc wavevector from wavelength
self._k = 2*np.pi/self._wl
if caller != 'theta':
self._theta = np.arcsin(np.outer(self._wl, self._qz[0, :])/np.pi/4)
if caller != 'qz':
self._qz = np.outer(2*self._k, np.sin(self._theta[0, :]))
@property
def energy(self):
return Q_(self._energy, u.eV)
@energy.setter
def energy(self, energy):
self._energy = np.array(energy.to('eV').magnitude, ndmin=1)
self.update_experiment('energy')
@property
def wl(self):
return Q_(self._wl, u.m).to('nm')
@wl.setter
def wl(self, wl):
self._wl = np.array(wl.to_base_units().magnitude, ndmin=1)
self.update_experiment('wl')
@property
def k(self):
return Q_(self._k, 1/u.m).to('1/nm')
@k.setter
def k(self, k):
self._k = np.array(k.to_base_units().magnitude, ndmin=1)
self.update_experiment('k')
@property
def theta(self):
return Q_(self._theta, u.rad).to('deg')
@theta.setter
def theta(self, theta):
self._theta = np.array(theta.to_base_units().magnitude, ndmin=1)
if self._theta.ndim < 2:
self._theta = np.tile(self._theta, (len(self._energy), 1))
self.update_experiment('theta')
@property
def qz(self):
return Q_(self._qz, 1/u.m).to('1/nm')
@qz.setter
def qz(self, qz):
self._qz = np.array(qz.to_base_units().magnitude, ndmin=1)
if self._qz.ndim < 2:
self._qz = np.tile(self._qz, (len(self._energy), 1))
self.update_experiment('qz')
class XrayKin(Xray):
r"""XrayKin
Kinetic X-ray scattering simulations.
Args:
S (Structure): sample to do simulations with.
force_recalc (boolean): force recalculation of results.
Keyword Args:
save_data (boolean): true to save simulation results.
cache_dir (str): path to cached data.
disp_messages (boolean): true to display messages from within the
simulations.
progress_bar (boolean): enable tqdm progress bar.
Attributes:
S (Structure): sample structure to calculate simulations on.
force_recalc (boolean): force recalculation of results.
save_data (boolean): true to save simulation results.
cache_dir (str): path to cached data.
disp_messages (boolean): true to display messages from within the
simulations.
progress_bar (boolean): enable tqdm progress bar.
energy (ndarray[float]): photon energies :math:`E` of scattering light
wl (ndarray[float]): wavelengths :math:`\lambda` of scattering light
k (ndarray[float]): wavenumber :math:`k` of scattering light
theta (ndarray[float]): incidence angles :math:`\theta` of scattering
light
qz (ndarray[float]): scattering vector :math:`q_z` of scattering light
polarizations (dict): polarization states and according names.
pol_in_state (int): incoming polarization state as defined in
polarizations dict.
pol_out_state (int): outgoing polarization state as defined in
polarizations dict.
pol_in (float): incoming polarization factor (can be a complex ndarray).
pol_out (float): outgoing polarization factor (can be a complex ndarray).
References:
.. [9] B. E. Warren (1990). *X-ray diffraction*.
New York: Dover Publications
"""
def __init__(self, S, force_recalc, **kwargs):
super().__init__(S, force_recalc, **kwargs)
def __str__(self):
"""String representation of this class"""
class_str = 'Kinematical X-Ray Diffraction simulation properties:\n\n'
class_str += super().__str__()
return class_str
def set_incoming_polarization(self, pol_in_state):
"""set_incoming_polarization
Sets the incoming polarization factor for sigma, pi, and unpolarized
polarization.
Args:
pol_in_state (int): incoming polarization state id.
"""
self.pol_in_state = pol_in_state
if (self.pol_in_state == 1): # circ +
self.disp_message('incoming polarizations {:s} not implemented'.format(
self.polarizations[self.pol_in_state]))
self.set_incoming_polarization(3)
return
elif (self.pol_in_state == 2): # circ-
self.disp_message('incoming polarizations {:s} not implemented'.format(
self.polarizations[self.pol_in_state]))
self.set_incoming_polarization(3)
return
elif (self.pol_in_state == 3): # sigma
self.pol_in = 0
elif (self.pol_in_state == 4): # pi
self.pol_in = 1
else: # unpolarized
self.pol_in_state = 0
self.pol_in = 0.5
self.disp_message('incoming polarizations set to: {:s}'.format(
self.polarizations[self.pol_in_state]))
def set_outgoing_polarization(self, pol_out_state):
"""set_outgoing_polarization
For kinematical X-ray simulation only "no analyzer polarization" is allowed.
Args:
pol_out_state (int): outgoing polarization state id.
"""
self.pol_out_state = pol_out_state
if self.pol_out_state == 0:
self.disp_message('analyzer polarizations set to: {:s}'.format(
self.polarizations[self.pol_out_state]))
else:
self.disp_message('XrayDyn does only allow for NO analyzer polarizations')
self.set_outgoing_polarization(0)
@u.wraps(None, (None, 'eV', 'm**-1', None), strict=False)
def get_uc_atomic_form_factors(self, energy, qz, uc):
""" get_uc_atomic_form_factors
Returns the energy- and angle-dependent atomic form factors
:math: `f(q_z, E)` of all atoms in the unit cell as a vector.
Args:
energy (float, Quantity): photon energy.
qz (ndarray[float, Quantity]): scattering vectors.
uc (UnitCell): unit cell object.
Returns:
f (ndarray[complex]): unit cell atomic form factors.
"""
if (not np.isscalar(energy)) and (not isinstance(energy, object)):
raise TypeError('Only scalars or Quantities are allowed for the energy!')
f = np.zeros([uc.num_atoms, len(qz)], dtype=complex)
for i in range(uc.num_atoms):
f[i, :] = uc.atoms[i][0].get_cm_atomic_form_factor(energy, qz)
return f
@u.wraps(None, (None, 'eV', 'm**-1', None, None), strict=False)
def get_uc_structure_factor(self, energy, qz, uc, strain=0):
r"""get_uc_structure_factor
Calculates the energy-, angle-, and strain-dependent structure factor
.. math: `S(E,q_z,\epsilon)` of the unit cell:
.. math::
S(E,q_z,\epsilon) = \sum_i^N f_i \, \exp(-i q_z z_i(\epsilon))
Args:
energy (float, Quantity): photon energy.
qz (ndarray[float, Quantity]): scattering vectors.
uc (UnitCell): unit cell object.
strain (float, optional): strain of the unit cell 0 .. 1.
Defaults to 0.
Returns:
S (ndarray[complex]): unit cell structure factor.
"""
if (not np.isscalar(energy)) and (not isinstance(energy, object)):
raise TypeError('Only scalars or Quantities for the energy are allowed!')
if np.isscalar(qz):
qz = np.array([qz])
S = np.sum(self.get_uc_atomic_form_factors(energy, qz, uc)
* np.exp(1j * uc._c_axis
* np.outer(uc.get_atom_positions(strain), qz)), 0)
return S
def homogeneous_reflectivity(self, strains=0):
r"""homogeneous_reflectivity
Calculates the reflectivity :math:`R = E_p^t\,(E_p^t)^*` of a
homogeneous sample structure as well as the reflected field
:math:`E_p^N` of all substructures.
Args:
strains (ndarray[float], optional): strains of each sub-structure
0 .. 1. Defaults to 0.
Returns:
(tuple):
- *R (ndarray[complex])* - homogeneous reflectivity.
- *A (ndarray[complex])* - reflected fields of sub-structures.
"""
if strains == 0:
strains = np.zeros([self.S.get_number_of_sub_structures(), 1])
t1 = time()
self.disp_message('Calculating _homogenous_reflectivity_ ...')
# get the reflected field of the structure for each energy
R = np.zeros_like(self._qz)
for i, energy in enumerate(self._energy):
qz = self._qz[i, :]
theta = self._theta[i, :]
Ept, A = self.homogeneous_reflected_field(self.S, energy, qz, theta, strains)
# calculate the real reflectivity from Ef
R[i, :] = np.real(Ept*np.conj(Ept))
self.disp_message('Elapsed time for _homogenous_reflectivity_: {:f} s'.format(time()-t1))
return R, A
@u.wraps((None, None), (None, None, 'eV', 'm**-1', 'rad', None), strict=False)
def homogeneous_reflected_field(self, S, energy, qz, theta, strains=0):
r"""homogeneous_reflected_field
Calculates the reflected field :math:`E_p^t` of the whole sample
structure as well as for each sub-structure (:math:`E_p^N`). The
reflected wave field :math:`E_p` from a single layer of unit cells at
the detector is calculated according to Ref. [9]_:
.. math::
E_p = \frac{i}{\varepsilon_0}\frac{e^2}{m_e c_0^2}
\frac{P(\vartheta) S(E,q_z,\epsilon)}{A q_z}
For the case of :math:`N` similar planes of unit cells one can write:
.. math::
E_p^N = \sum_{n=0}^{N-1} E_p \exp(i q_z z n )
where :math:`z` is the distance between the planes (c-axis). The above
equation can be simplified to:
.. math::
E_p^N = E_p \psi(q_z,z,N)
introducing the interference function
.. math::
\psi(q_z,z,N) & = \sum_{n=0}^{N-1} \exp(i q_z z n) \\
& = \frac{1- \exp(i q_z z N)}{1- \exp(i q_z z)}
The total reflected wave field of all :math:`i = 1\ldots M` homogeneous
layers (:math:`E_p^t`) is the phase-correct summation of all individual
:math:`E_p^{N,i}`:
.. math::
E_p^t = \sum_{i=1}^M E_p^{N,i} \exp(i q_z Z_i)
where :math:`Z_i = \sum_{j=1}^{i-1} N_j z_j` is the distance of the
:math:`i`-th layer from the surface.
Args:
S (Structure, UnitCell): structure or sub-structure to calculate on.
energy (float, Quantity): photon energy.
qz (ndarray[float, Quantity]): scattering vectors.
theta (ndarray[float, Quantity]): scattering incidence angle.
strains (ndarray[float], optional): strains of each sub-structure
0 .. 1. Defaults to 0.
Returns:
(tuple):
- *Ept (ndarray[complex])* - reflected field.
- *A (ndarray[complex])* - reflected fields of substructures.
"""
# if no strains are given we assume no strain (1)
if np.isscalar(strains) and strains == 0:
strains = np.zeros([self.S.get_number_of_sub_structures(), 1])
N = len(qz) # nb of qz
Ept = np.zeros([1, N]) # total reflected field
Z = 0 # total length of the substructure from the surface
A = list([0, 2]) # cell matrix of reflected fields EpN of substructures
strainCounter = 0 # the is the index of the strain vector if applied
# traverse substructures
for sub_structures in S.sub_structures:
if isinstance(sub_structures[0], UnitCell):
# the substructure is an unit cell and we can calculate
# Ep directly
Ep = self.get_Ep(energy, qz, theta, sub_structures[0], strains[strainCounter])
z = sub_structures[0]._c_axis
strainCounter = strainCounter+1
elif isinstance(sub_structures[0], AmorphousLayer):
raise ValueError('The substructure cannot be an AmorphousLayer!')
else:
# the substructure is a structure, so we do a recursive
# call of this method
d = sub_structures[0].get_number_of_sub_structures()
Ep, temp = self.homogeneous_reflected_field(
sub_structures[0], energy, qz, theta,
strains[strainCounter:(strainCounter + d)])
z = sub_structures[0].get_length().magnitude
strainCounter = strainCounter + d
A.append([temp, [sub_structures[0].name + ' substructures']])
A.append([Ep, '{:d}x {:s}'.format(1, sub_structures[0].name)])
# calculate the interference function for N repetitions of
# the substructure with the length z
psi = self.get_interference_function(qz, z, sub_structures[1])
# calculate the reflected field for N repetitions of
# the substructure with the length z
EpN = Ep * psi
# remember the result
A.append([EpN, '{:d}x {:s}'.format(sub_structures[1], sub_structures[0].name)])
# add the reflected field of the current substructure
# phase-correct to the already calculated substructures
Ept = Ept+(EpN*np.exp(1j*qz*Z))
# update the total length $Z$ of the already calculated
# substructures
Z = Z + z*sub_structures[1]
# add static substrate to kinXRD
if S.substrate != []:
temp, temp2 = self.homogeneous_reflected_field(S.substrate, energy, qz, theta)
A.append([temp2, 'static substrate'])
Ept = Ept+(temp*np.exp(1j*qz*Z))
return Ept, A
@u.wraps(None, (None, 'm**-1', 'm', None), strict=False)
def get_interference_function(self, qz, z, N):
r"""get_interference_function
Calculates the interference function for :math:`N` repetitions of the
structure with the length :math:`z`:
.. math::
\psi(q_z,z,N) & = \sum_{n=0}^{N-1} \exp(i q_z z n) \\
& = \frac{1- \exp(i q_z z N)}{1- \exp(i q_z z)}
Args:
qz (ndarray[float, Quantity]): scattering vectors.
z (float): thickness/length of the structure.
N (int): repetitions of the structure.
Returns:
psi (ndarray[complex]): interference function.
"""
psi = (1-np.exp(1j*qz*z*N)) / (1 - np.exp(1j*qz*z))
return psi
@u.wraps(None, (None, 'eV', 'm**-1', 'rad', None, None), strict=False)
def get_Ep(self, energy, qz, theta, uc, strain):
r"""get_Ep
Calculates the reflected field :math:`E_p` for one unit cell
with a given strain :math:`\epsilon`:
.. math::
E_p = \frac{i}{\varepsilon_0} \frac{e^2}{m_e c_0^2}
\frac{P S(E,q_z,\epsilon)}{A q_z}
with :math:`e` as electron charge, :math:`m_e` as electron
mass, :math:`c_0` as vacuum light velocity,
:math:`\varepsilon_0` as vacuum permittivity,
:math:`P` as polarization factor and :math:`S(E,q_z,\sigma)`
as energy-, angle-, and strain-dependent unit cell structure
factor.
Args:
energy (float, Quantity): photon energy.
qz (ndarray[float, Quantity]): scattering vectors.
theta (ndarray[float, Quantity]): scattering incidence angle.
uc (UnitCell): unit cell object.
strain (float, optional): strain of the unit cell 0 .. 1.
Defaults to 0.
Returns:
Ep (ndarray[complex]): reflected field.
"""
import scipy.constants as c
Ep = 1j/c.epsilon_0*c.elementary_charge**2/c.electron_mass/c.c**2 \
* (self.get_polarization_factor(theta)
* self.get_uc_structure_factor(energy, qz, uc, strain)
/ uc._area) / qz
return Ep
class XrayDyn(Xray):
r"""XrayDyn
Dynamical X-ray scattering simulations.
Args:
S (Structure): sample to do simulations with.
force_recalc (boolean): force recalculation of results.
Keyword Args:
save_data (boolean): true to save simulation results.
cache_dir (str): path to cached data.
disp_messages (boolean): true to display messages from within the
simulations.
progress_bar (boolean): enable tqdm progress bar.
Attributes:
S (Structure): sample structure to calculate simulations on.
force_recalc (boolean): force recalculation of results.
save_data (boolean): true to save simulation results.
cache_dir (str): path to cached data.
disp_messages (boolean): true to display messages from within the
simulations.
progress_bar (boolean): enable tqdm progress bar.
energy (ndarray[float]): photon energies :math:`E` of scattering light
wl (ndarray[float]): wavelengths :math:`\lambda` of scattering light
k (ndarray[float]): wavenumber :math:`k` of scattering light
theta (ndarray[float]): incidence angles :math:`\theta` of scattering
light
qz (ndarray[float]): scattering vector :math:`q_z` of scattering light
polarizations (dict): polarization states and according names.
pol_in_state (int): incoming polarization state as defined in
polarizations dict.
pol_out_state (int): outgoing polarization state as defined in
polarizations dict.
pol_in (float): incoming polarization factor (can be a complex ndarray).
pol_out (float): outgoing polarization factor (can be a complex ndarray).
last_atom_ref_trans_matrices (list): remember last result of
atom ref_trans_matrices to speed up calculation.
"""
def __init__(self, S, force_recalc, **kwargs):
super().__init__(S, force_recalc, **kwargs)
self.last_atom_ref_trans_matrices = {'atom_ids': [],
'hashes': [],
'H': []}
def __str__(self):
"""String representation of this class"""
class_str = 'Dynamical X-Ray Diffraction simulation properties:\n\n'
class_str += super().__str__()
return class_str
def set_incoming_polarization(self, pol_in_state):
"""set_incoming_polarization
Sets the incoming polarization factor for sigma, pi, and unpolarized
polarization.
Args:
pol_in_state (int): incoming polarization state id.
"""
self.pol_in_state = pol_in_state
if (self.pol_in_state == 1): # circ +
self.disp_message('incoming polarizations {:s} not implemented'.format(
self.polarizations[self.pol_in_state]))
self.set_incoming_polarization(3)
return
elif (self.pol_in_state == 2): # circ-
self.disp_message('incoming polarizations {:s} not implemented'.format(
self.polarizations[self.pol_in_state]))
self.set_incoming_polarization(3)
return
elif (self.pol_in_state == 3): # sigma
self.pol_in = 0
elif (self.pol_in_state == 4): # pi
self.pol_in = 1
else: # unpolarized
self.pol_in_state = 0
self.pol_in = 0.5
self.disp_message('incoming polarizations set to: {:s}'.format(
self.polarizations[self.pol_in_state]))
def set_outgoing_polarization(self, pol_out_state):
"""set_outgoing_polarization
For dynamical X-ray simulation only "no analyzer polarization" is allowed.
Args:
pol_out_state (int): outgoing polarization state id.
"""
self.pol_out_state = pol_out_state
if self.pol_out_state == 0:
self.disp_message('analyzer polarizations set to: {:s}'.format(
self.polarizations[self.pol_out_state]))
else:
self.disp_message('XrayDyn does only allow for NO analyzer polarizations')
self.set_outgoing_polarization(0)
def homogeneous_reflectivity(self, *args):
r"""homogeneous_reflectivity
Calculates the reflectivity :math:`R` of the whole sample structure
and the reflectivity-transmission matrices :math:`M_{RT}` for
each substructure. The reflectivity of the :math:`2\times 2`
matrices for each :math:`q_z` is calculates as follow:
.. math:: R = \left|M_{RT}^t(0,1)/M_{RT}^t(1,1)\right|^2
Args:
*args (ndarray[float], optional): strains for each substructure.
Returns:
(tuple):
- *R (ndarray[float])* - homogeneous reflectivity.
- *A (ndarray[complex])* - reflectivity-transmission matrices of
sub-structures.
"""
# if no strains are given we assume no strain
if len(args) == 0:
strains = np.zeros([self.S.get_number_of_sub_structures(), 1])
else:
strains = args[0]
t1 = time()
self.disp_message('Calculating _homogenous_reflectivity_ ...')
# get the reflectivity-transmission matrix of the structure
RT, A = self.homogeneous_ref_trans_matrix(self.S, strains)
# calculate the real reflectivity from the RT matrix
R = self.calc_reflectivity_from_matrix(RT)
self.disp_message('Elapsed time for _homogenous_reflectivity_: {:f} s'.format(time()-t1))
return R, A
def homogeneous_ref_trans_matrix(self, S, *args):
r"""homogeneous_ref_trans_matrix
Calculates the reflectivity-transmission matrices :math:`M_{RT}` of
the whole sample structure as well as for each sub-structure.
The reflectivity-transmission matrix of a single unit cell is
calculated from the reflection-transmission matrices :math:`H_i`
of each atom and the phase matrices between the atoms :math:`L_i`:
.. math:: M_{RT} = \prod_i H_i \ L_i
For :math:`N` similar layers of unit cells one can calculate the
:math:`N`-th power of the unit cell :math:`\left(M_{RT}\right)^N`.
The reflection-transmission matrix for the whole sample
:math:`M_{RT}^t` consisting of :math:`j = 1\ldots M`
sub-structures is then again:
.. math:: M_{RT}^t = \prod_{j=1}^M \left(M_{RT^,j}\right)^{N_j}
Args:
S (Structure, UnitCell): structure or sub-structure to calculate on.
*args (ndarray[float], optional): strains for each substructure.
Returns:
(tuple):
- *RT (ndarray[complex])* - reflectivity-transmission matrix.
- *A (ndarray[complex])* - reflectivity-transmission matrices of
sub-structures.
"""
# if no strains are given we assume no strain (1)
if len(args) == 0:
strains = np.zeros([S.get_number_of_sub_structures(), 1])
else:
strains = args[0]
# initialize
RT = np.tile(np.eye(2, 2)[np.newaxis, np.newaxis, :, :],
(np.size(self._qz, 0), np.size(self._qz, 1), 1, 1)) # ref_trans_matrix
A = [] # list of ref_trans_matrices of substructures
strainCounter = 0
# traverse substructures
for sub_structure in S.sub_structures:
if isinstance(sub_structure[0], UnitCell):
# the sub_structure is an unitCell
# calculate the ref-trans matrices for N unitCells
temp = m_power_x(self.get_uc_ref_trans_matrix(
sub_structure[0], strains[strainCounter]),
sub_structure[1])
strainCounter += 1
# remember the result
A.append([temp, '{:d}x {:s}'.format(sub_structure[1], sub_structure[0].name)])
elif isinstance(sub_structure[0], AmorphousLayer):
raise ValueError('The substructure cannot be an AmorphousLayer!')
else:
# its a structure
# make a recursive call
temp, temp2 = self.homogeneous_ref_trans_matrix(
sub_structure[0],
strains[strainCounter:(strainCounter
+ sub_structure[0].get_number_of_sub_structures())])
A.append([temp2, sub_structure[0].name + ' substructures'])
strainCounter = strainCounter+sub_structure[0].get_number_of_sub_structures()
A.append([temp, '{:d}x {:s}'.format(sub_structure[1], sub_structure[0].name)])
# calculate the ref-trans matrices for N sub structures
temp = m_power_x(temp, sub_structure[1])
A.append([temp, '{:d}x {:s}'.format(sub_structure[1], sub_structure[0].name)])
# multiply it to the output
RT = m_times_n(RT, temp)
# if a substrate is included add it at the end
if S.substrate != []:
temp, temp2 = self.homogeneous_ref_trans_matrix(S.substrate)
A.append([temp2, 'static substrate'])
RT = m_times_n(RT, temp)
return RT, A
def inhomogeneous_reflectivity(self, strain_map, strain_vectors, **kwargs):
"""inhomogeneous_reflectivity
Returns the reflectivity of an inhomogeneously strained sample
structure for a given ``strain_map`` in position and time, as well
as for a given set of possible strains for each unit cell in the
sample structure (``strain_vectors``).
If no reflectivity is saved in the cache it is caluclated.
Providing the ``calc_type`` for the calculation the corresponding
sub-routines for the reflectivity computation are called:
* ``parallel`` parallelization over the time steps utilizing
`Dask <https://dask.org/>`_
* ``distributed`` not implemented in Python, but should be possible
with `Dask <https://dask.org/>`_ as well
* ``sequential`` no parallelization at all
Args:
strain_map (ndarray[float]): spatio-temporal strain profile.
strain_vectors (list[ndarray[float]]): reduced strains per unique
layer.
**kwargs:
- *calc_type (str)* - type of calculation.
- *dask_client (Dask.Client)* - Dask client.
- *job (Dask.job)* - Dask job.
- *num_workers (int)* - Dask number of workers.
Returns:
R (ndarray[float]): inhomogeneous reflectivity.
"""
# create a hash of all simulation parameters
filename = 'inhomogeneous_reflectivity_dyn_' \
+ self.get_hash(strain_vectors, strain_map=strain_map) \
+ '.npz'
full_filename = path.abspath(path.join(self.cache_dir, filename))
# check if we find some corresponding data in the cache dir
if path.exists(full_filename) and not self.force_recalc:
# found something so load it
tmp = np.load(full_filename)
R = tmp['R']
self.disp_message('_inhomogeneous_reflectivity_ loaded from file:\n\t' + filename)
else:
t1 = time()
self.disp_message('Calculating _inhomogeneousReflectivity_ ...')
# parse the input arguments
if not isinstance(strain_map, np.ndarray):
raise TypeError('strain_map must be a numpy ndarray!')
if not isinstance(strain_vectors, list):
raise TypeError('strain_vectors must be a list!')
dask_client = kwargs.get('dask_client', [])
calc_type = kwargs.get('calc_type', 'sequential')
if calc_type not in ['parallel', 'sequential', 'distributed']:
raise TypeError('calc_type must be either _parallel_, '
'_sequential_, or _distributed_!')
job = kwargs.get('job')
num_workers = kwargs.get('num_workers', 1)
# All ref-trans matrices for all unique unitCells and for all
# possible strains, given by strainVectors, are calculated in
# advance.
RTM = self.get_all_ref_trans_matrices(strain_vectors)
# select the type of computation
if calc_type == 'parallel':
R = self.parallel_inhomogeneous_reflectivity(strain_map,
strain_vectors,
RTM,
dask_client)
elif calc_type == 'distributed':
R = self.distributed_inhomogeneous_reflectivity(strain_map,
strain_vectors,
job,
num_workers,
RTM)
else: # sequential
R = self.sequential_inhomogeneous_reflectivity(strain_map,
strain_vectors,
RTM)
self.disp_message('Elapsed time for _inhomogeneous_reflectivity_:'
' {:f} s'.format(time()-t1))
self.save(full_filename, {'R': R}, '_inhomogeneous_reflectivity_')
return R
def sequential_inhomogeneous_reflectivity(self, strain_map, strain_vectors, RTM):
"""sequential_inhomogeneous_reflectivity
Returns the reflectivity of an inhomogeneously strained sample structure
for a given ``strain_map`` in position and time, as well as for a given
set of possible strains for each unit cell in the sample structure
(``strain_vectors``). The function calculates the results sequentially
without parallelization.
Args:
strain_map (ndarray[float]): spatio-temporal strain profile.
strain_vectors (list[ndarray[float]]): reduced strains per unique
layer.
RTM (list[ndarray[complex]]): reflection-transmission matrices for
all given strains per unique layer.
Returns:
R (ndarray[float]): inhomogeneous reflectivity.
"""
# initialize
M = np.size(strain_map, 0) # delay steps
R = np.zeros([M, np.size(self._qz, 0), np.size(self._qz, 1)])
if self.progress_bar:
iterator = trange(M, desc='Progress', leave=True)
else:
iterator = range(M)
# get the inhomogeneous reflectivity of the sample
# structure for each time step of the strain map
for i in iterator:
R[i, :, :] = self.calc_inhomogeneous_reflectivity(strain_map[i, :],
strain_vectors,
RTM)
return R
def parallel_inhomogeneous_reflectivity(self, strain_map, strain_vectors,
RTM, dask_client):
"""parallel_inhomogeneous_reflectivity
Returns the reflectivity of an inhomogeneously strained sample structure
for a given ``strain_map`` in position and time, as well as for a given
set of possible strains for each unit cell in the sample structure
(``strain_vectors``). The function parallelizes the calculation over the
time steps, since the results do not depend on each other.
Args:
strain_map (ndarray[float]): spatio-temporal strain profile.
strain_vectors (list[ndarray[float]]): reduced strains per unique
layer.
RTM (list[ndarray[complex]]): reflection-transmission matrices for
all given strains per unique layer.
dask_client (Dask.Client): Dask client.
Returns:
R (ndarray[float]): inhomogeneous reflectivity.
"""
if not dask_client:
raise ValueError('no dask client set')
from dask import delayed # to allow parallel computation
# initialize
res = []
M = np.size(strain_map, 0) # delay steps
N = np.size(self._qz, 0) # energy steps
K = np.size(self._qz, 1) # qz steps
R = np.zeros([M, N, K])
uc_indices, _, _ = self.S.get_layer_vectors()
# init unity matrix for matrix multiplication
RTU = np.tile(np.eye(2, 2)[np.newaxis, np.newaxis, :, :], (N, K, 1, 1))
# make RTM available for all works
remote_RTM = dask_client.scatter(RTM)
remote_RTU = dask_client.scatter(RTU)
remote_uc_indices = dask_client.scatter(uc_indices)
remote_strain_vectors = dask_client.scatter(strain_vectors)
# precalculate the substrate ref_trans_matrix if present
if self.S.substrate != []:
RTS, _ = self.homogeneous_ref_trans_matrix(self.S.substrate)
else:
RTS = RTU
# create dask.delayed tasks for all delay steps
for i in range(M):
RT = delayed(XrayDyn.calc_inhomogeneous_ref_trans_matrix)(
remote_uc_indices,
remote_RTU,
strain_map[i, :],
remote_strain_vectors,
remote_RTM)
RT = delayed(m_times_n)(RT, RTS)
Ri = delayed(XrayDyn.calc_reflectivity_from_matrix)(RT)
res.append(Ri)
# compute results
res = dask_client.compute(res, sync=True)
# reorder results to reflectivity matrix
for i in range(M):
R[i, :, :] = res[i]
return R
def distributed_inhomogeneous_reflectivity(self, strain_map, strain_vectors, RTM,
job, num_worker):
"""distributed_inhomogeneous_reflectivity
This is a stub. Not yet implemented in python.
Args:
strain_map (ndarray[float]): spatio-temporal strain profile.
strain_vectors (list[ndarray[float]]): reduced strains per unique
layer.
RTM (list[ndarray[complex]]): reflection-transmission matrices for
all given strains per unique layer.
job (Dask.job): Dask job.
num_workers (int): Dask number of workers.
Returns:
R (ndarray[float]): inhomogeneous reflectivity.
"""
raise NotImplementedError
def calc_inhomogeneous_reflectivity(self, strains, strain_vectors, RTM):
r"""calc_inhomogeneous_reflectivity
Calculates the reflectivity of a inhomogeneous sample structure for
given ``strain_vectors`` for a single time step. Similar to the
homogeneous sample structure, the reflectivity of an unit cell is
calculated from the reflection-transmission matrices :math:`H_i` of
each atom and the phase matrices between the atoms :math:`L_i` in the
unit cell:
.. math:: M_{RT} = \prod_i H_i \ L_i
Since all layers are generally inhomogeneously strained we have to
traverse all individual unit cells (:math:`j = 1\ldots M`) in the
sample to calculate the total reflection-transmission matrix
:math:`M_{RT}^t`:
.. math:: M_{RT}^t = \prod_{j=1}^M M_{RT,j}
The reflectivity of the :math:`2\times 2` matrices for each :math:`q_z`
is calculates as follow:
.. math:: R = \left|M_{RT}^t(1,2)/M_{RT}^t(2,2)\right|^2
Args:
strain_map (ndarray[float]): spatio-temporal strain profile.
strain_vectors (list[ndarray[float]]): reduced strains per unique
layer.
RTM (list[ndarray[complex]]): reflection-transmission matrices for
all given strains per unique layer.
Returns:
R (ndarray[float]): inhomogeneous reflectivity.
"""
# initialize ref_trans_matrix
N = np.shape(self._qz)[1] # number of q_z
M = np.shape(self._qz)[0] # number of energies
uc_indices, _, _ = self.S.get_layer_vectors()
# initialize ref_trans_matrix
RTU = np.tile(np.eye(2, 2)[np.newaxis, np.newaxis, :, :], (M, N, 1, 1))
RT = XrayDyn.calc_inhomogeneous_ref_trans_matrix(uc_indices,
RTU,
strains,
strain_vectors,
RTM)
# if a substrate is included add it at the end
if self.S.substrate != []:
RTS, _ = self.homogeneous_ref_trans_matrix(self.S.substrate)
RT = m_times_n(RT, RTS)
# calculate reflectivity from ref-trans matrix
R = self.calc_reflectivity_from_matrix(RT)
return R
@staticmethod
def calc_inhomogeneous_ref_trans_matrix(uc_indices, RT, strains,
strain_vectors, RTM):
r"""calc_inhomogeneous_ref_trans_matrix
Sub-function of :meth:`calc_inhomogeneous_reflectivity` and for
parallel computing (needs to be static) only for calculating the
total reflection-transmission matrix :math:`M_{RT}^t`:
.. math:: M_{RT}^t = \prod_{j=1}^M M_{RT,j}
Args:
uc_indices (ndarray[float]): unit cell indices.
RT (ndarray[complex]): reflection-transmission matrix.
strains (ndarray[float]): spatial strain profile for single time
step.
strain_vectors (list[ndarray[float]]): reduced strains per unique
layer.
RTM (list[ndarray[complex]]): reflection-transmission matrices for
all given strains per unique layer.
Returns:
RT (ndarray[complex]): reflection-transmission matrix.
"""
# traverse all unit cells in the sample structure
for i, uc_index in enumerate(uc_indices):
# Find the ref-trans matrix in the RTM cell array for the
# current unit_cell ID and applied strain. Use the
# ``knnsearch`` function to find the nearest strain value.
strain_index = finderb(strains[i], strain_vectors[int(uc_index)])[0]
temp = RTM[int(uc_index)][strain_index]
if temp is not []:
RT = m_times_n(RT, temp)
else:
raise ValueError('RTM not found')
return RT
def get_all_ref_trans_matrices(self, *args):
"""get_all_ref_trans_matrices
Returns a list of all reflection-transmission matrices for each
unique unit cell in the sample structure for a given set of applied
strains for each unique unit cell given by the ``strain_vectors``
input. If this data was saved on disk before, it is loaded, otherwise
it is calculated.
Args:
args (list[ndarray[float]], optional): reduced strains per unique
layer.
Returns:
RTM (list[ndarray[complex]]): reflection-transmission matrices for
all given strains per unique layer.
"""
if len(args) == 0:
strain_vectors = [np.array([1])]*self.S.get_number_of_unique_layers()
else:
strain_vectors = args[0]
# create a hash of all simulation parameters
filename = 'all_ref_trans_matrices_dyn_' \
+ self.get_hash(strain_vectors) + '.npz'
full_filename = path.abspath(path.join(self.cache_dir, filename))
# check if we find some corresponding data in the cache dir
if path.exists(full_filename) and not self.force_recalc:
# found something so load it
tmp = np.load(full_filename)
RTM = tmp['RTM']
self.disp_message('_all_ref_trans_matrices_dyn_ loaded from file:\n\t' + filename)
else:
# nothing found so calculate it and save it
RTM = self.calc_all_ref_trans_matrices(strain_vectors)
self.save(full_filename, {'RTM': RTM}, '_all_ref_trans_matrices_dyn_')
return RTM
def calc_all_ref_trans_matrices(self, *args):
"""calc_all_ref_trans_matrices
Calculates a list of all reflection-transmission matrices for each
unique unit cell in the sample structure for a given set of applied
strains to each unique unit cell given by the ``strain_vectors`` input.
Args::
args (list[ndarray[float]], optional): reduced strains per unique
layer.
Returns:
RTM (list[ndarray[complex]]): reflection-transmission matrices for
all given strains per unique layer.
"""
t1 = time()
self.disp_message('Calculate all _ref_trans_matrices_ ...')
# initialize
uc_ids, uc_handles = self.S.get_unique_layers()
# if no strain_vectors are given we just do it for no strain (1)
if len(args) == 0:
strain_vectors = [np.array([1])]*len(uc_ids)
else:
strain_vectors = args[0]
# check if there are strains for each unique unitCell
if len(strain_vectors) is not len(uc_ids):
raise TypeError('The strain vector has not the same size '
'as number of unique unit cells')
# initialize ref_trans_matrices
RTM = []
# traverse all unique unit_cells
for i, uc in enumerate(uc_handles):
# traverse all strains in the strain_vector for this unique
# unit_cell
if not isinstance(uc, UnitCell):
raise ValueError('All layers must be UnitCells!')
temp = []
for strain in strain_vectors[i]:
temp.append(self.get_uc_ref_trans_matrix(uc, strain))
RTM.append(temp)
self.disp_message('Elapsed time for _ref_trans_matrices_: {:f} s'.format(time()-t1))
return RTM
def get_uc_ref_trans_matrix(self, uc, *args):
r"""get_uc_ref_trans_matrix
Returns the reflection-transmission matrix of a unit cell:
.. math:: M_{RT} = \prod_i H_i \ L_i
where :math:`H_i` and :math:`L_i` are the atomic reflection-
transmission matrix and the phase matrix for the atomic distances,
respectively.
Args:
uc (UnitCell): unit cell object.
args (float, optional): strain of unit cell.
Returns:
RTM (list[ndarray[complex]]): reflection-transmission matrices for
all given strains per unique layer.
"""
if len(args) == 0:
strain = 0 # set the default strain to 0
else:
strain = args[0]
M = len(self._energy) # number of energies
N = np.shape(self._qz)[1] # number of q_z
K = uc.num_atoms # number of atoms
# initialize matrices
RTM = np.tile(np.eye(2, 2)[np.newaxis, np.newaxis, :, :], (M, N, 1, 1))
# traverse all atoms of the unit cell
for i in range(K):
# Calculate the relative distance between the atoms.
# The relative position is calculated by the function handle
# stored in the atoms list as 3rd element. This
# function returns a relative postion dependent on the
# applied strain.
if i == (K-1): # its the last atom
del_dist = (strain+1)-uc.atoms[i][1](strain)
else:
del_dist = uc.atoms[i+1][1](strain)-uc.atoms[i][1](strain)
# get the reflection-transmission matrix and phase matrix
# from all atoms in the unit cell and multiply them
# together
RTM = m_times_n(RTM,
self.get_atom_ref_trans_matrix(uc.atoms[i][0],
uc._area,
uc._deb_wal_fac))
RTM = m_times_n(RTM,
self.get_atom_phase_matrix(del_dist*uc._c_axis))
return RTM
def get_atom_ref_trans_matrix(self, atom, area, deb_wal_fac):
r"""get_atom_ref_trans_matrix
Calculates the reflection-transmission matrix of an atom from dynamical
x-ray theory:
.. math::
H = \frac{1}{\tau} \begin{bmatrix}
\left(\tau^2 - \rho^2\right) & \rho \\
-\rho & 1
\end{bmatrix}
Args:
atom (Atom, AtomMixed): atom or mixed atom
area (float): area of the unit cell [m²]
deb_wal_fac (float): Debye-Waller factor for unit cell
Returns:
H (ndarray[complex]): reflection-transmission matrix
"""
# check for already calculated data
_hash = make_hash_md5([self._energy, self._qz, self.pol_in_state, self.pol_out_state,
area, deb_wal_fac])
try:
index = self.last_atom_ref_trans_matrices['atom_ids'].index(atom.id)
except ValueError:
index = -1
if (index >= 0) and (_hash == self.last_atom_ref_trans_matrices['hashes'][index]):
# These are the same X-ray parameters as last time so we
# can use the same matrix again for this atom
H = self.last_atom_ref_trans_matrices['H'][index]
else:
# These are new parameters so we have to calculate.
# Get the reflection-transmission-factors
rho = self.get_atom_reflection_factor(atom, area, deb_wal_fac)
tau = self.get_atom_transmission_factor(atom, area, deb_wal_fac)
# calculate the reflection-transmission matrix
H = np.zeros([np.shape(self._qz)[0], np.shape(self._qz)[1], 2, 2], dtype=np.cfloat)
H[:, :, 0, 0] = (1/tau)*(tau**2-rho**2)
H[:, :, 0, 1] = (1/tau)*(rho)
H[:, :, 1, 0] = (1/tau)*(-rho)
H[:, :, 1, 1] = (1/tau)
# remember this matrix for next use with the same
# parameters for this atom
if index >= 0:
self.last_atom_ref_trans_matrices['atom_ids'][index] = atom.id
self.last_atom_ref_trans_matrices['hashes'][index] = _hash
self.last_atom_ref_trans_matrices['H'][index] = H
else:
self.last_atom_ref_trans_matrices['atom_ids'].append(atom.id)
self.last_atom_ref_trans_matrices['hashes'].append(_hash)
self.last_atom_ref_trans_matrices['H'].append(H)
return H
def get_atom_reflection_factor(self, atom, area, deb_wal_fac):
r"""get_atom_reflection_factor
Calculates the reflection factor from dynamical x-ray theory:
.. math:: \rho = \frac{-i 4 \pi \ r_e \ f(E,q_z) \ P(\theta)
\exp(-M)}{q_z \ A}
- :math:`r_e` is the electron radius
- :math:`f(E,q_z)` is the energy and angle dispersive atomic
form factor
- :math:`P(q_z)` is the polarization factor
- :math:`A` is the area in :math:`x-y` plane on which the atom
is placed
- :math:`M = 0.5(\mbox{dbf} \ q_z)^2)` where
:math:`\mbox{dbf}^2 = \langle u^2\rangle` is the average
thermal vibration of the atoms - Debye-Waller factor
Args:
atom (Atom, AtomMixed): atom or mixed atom
area (float): area of the unit cell [m²]
deb_wal_fac (float): Debye-Waller factor for unit cell
Returns:
rho (complex): reflection factor
"""
rho = (-4j*np.pi*r_0
* atom.get_cm_atomic_form_factor(self._energy, self._qz)
* self.get_polarization_factor(self._theta)
* np.exp(-0.5*(deb_wal_fac*self._qz)**2))/(self._qz*area)
return rho
def get_atom_transmission_factor(self, atom, area, deb_wal_fac):
r"""get_atom_transmission_factor
Calculates the transmission factor from dynamical x-ray theory:
.. math:: \tau = 1 - \frac{i 4 \pi r_e f(E,0) \exp(-M)}{q_z A}
- :math:`r_e` is the electron radius
- :math:`f(E,0)` is the energy dispersive atomic form factor
(no angle correction)
- :math:`A` is the area in :math:`x-y` plane on which the atom
is placed
- :math:`M = 0.5(\mbox{dbf} \ q_z)^2` where
:math:`\mbox{dbf}^2 = \langle u^2\rangle` is the average
thermal vibration of the atoms - Debye-Waller factor
Args:
atom (Atom, AtomMixed): atom or mixed atom
area (float): area of the unit cell [m²]
deb_wal_fac (float): Debye-Waller factor for unit cell
Returns:
tau (complex): transmission factor
"""
tau = 1 - (4j*np.pi*r_0
* atom.get_cm_atomic_form_factor(self._energy, np.zeros_like(self._qz))
* np.exp(-0.5*(deb_wal_fac*self._qz)**2))/(self._qz*area)
return tau
def get_atom_phase_matrix(self, distance):
r"""get_atom_phase_matrix
Calculates the phase matrix from dynamical x-ray theory:
.. math::
L = \begin{bmatrix}
\exp(i \phi) & 0 \\
0 & \exp(-i \phi)
\end{bmatrix}
Args:
distance (float): distance between atomic planes
Returns:
L (ndarray[complex]): phase matrix
"""
phi = self.get_atom_phase_factor(distance)
L = np.zeros([np.shape(self._qz)[0], np.shape(self._qz)[1], 2, 2], dtype=np.cfloat)
L[:, :, 0, 0] = np.exp(1j*phi)
L[:, :, 1, 1] = np.exp(-1j*phi)
return L
def get_atom_phase_factor(self, distance):
r"""get_atom_phase_factor
Calculates the phase factor :math:`\phi` for a distance :math:`d`
from dynamical x-ray theory:
.. math:: \phi = \frac{d \ q_z}{2}
Args:
distance (float): distance between atomic planes
Returns:
phi (float): phase factor
"""
phi = distance * self._qz/2
return phi
@staticmethod
def calc_reflectivity_from_matrix(M):
r"""calc_reflectivity_from_matrix
Calculates the reflectivity from an :math:`2\times2` matrix of
transmission and reflectivity factors:
.. math:: R = \left|M(0,1)/M(1,1)\right|^2
Args:
M (ndarray[complex]): reflection-transmission matrix
Returns:
R (ndarray[float]): reflectivity
"""
return np.abs(M[:, :, 0, 1]/M[:, :, 1, 1])**2
class XrayDynMag(Xray):
r"""XrayDynMag
Dynamical magnetic X-ray scattering simulations.
Adapted from Elzo et.al. [10]_ and initially realized in `Project Dyna
<http://dyna.neel.cnrs.fr>`_.
Original copyright notice:
*Copyright Institut Neel, CNRS, Grenoble, France*
**Project Collaborators:**
- Stéphane Grenier, stephane.grenier@neel.cnrs.fr
- Marta Elzo (PhD, 2009-2012)
- Nicolas Jaouen Sextants beamline, Synchrotron Soleil,
nicolas.jaouen@synchrotron-soleil.fr
- Emmanuelle Jal (PhD, 2010-2013) now at `LCPMR CNRS, Paris
<https://lcpmr.cnrs.fr/content/emmanuelle-jal>`_
- Jean-Marc Tonnerre, jean-marc.tonnerre@neel.cnrs.fr
- Ingrid Hallsteinsen - Padraic Shaffer’s group - Berkeley Nat. Lab.
**Questions to:**
- Stéphane Grenier, stephane.grenier@neel.cnrs.fr
Args:
S (Structure): sample to do simulations with.
force_recalc (boolean): force recalculation of results.
Keyword Args:
save_data (boolean): true to save simulation results.
cache_dir (str): path to cached data.
disp_messages (boolean): true to display messages from within the
simulations.
progress_bar (boolean): enable tqdm progress bar.
Attributes:
S (Structure): sample structure to calculate simulations on.
force_recalc (boolean): force recalculation of results.
save_data (boolean): true to save simulation results.
cache_dir (str): path to cached data.
disp_messages (boolean): true to display messages from within the
simulations.
progress_bar (boolean): enable tqdm progress bar.
energy (ndarray[float]): photon energies :math:`E` of scattering light
wl (ndarray[float]): wavelengths :math:`\lambda` of scattering light
k (ndarray[float]): wavenumber :math:`k` of scattering light
theta (ndarray[float]): incidence angles :math:`\theta` of scattering
light
qz (ndarray[float]): scattering vector :math:`q_z` of scattering light
polarizations (dict): polarization states and according names.
pol_in_state (int): incoming polarization state as defined in
polarizations dict.
pol_out_state (int): outgoing polarization state as defined in
polarizations dict.
pol_in (float): incoming polarization factor (can be a complex ndarray).
pol_out (float): outgoing polarization factor (can be a complex ndarray).
last_atom_ref_trans_matrices (list): remember last result of
atom ref_trans_matrices to speed up calculation.
References:
.. [10] M. Elzo, E. Jal, O. Bunau, S. Grenier, Y. Joly, A. Y.
Ramos, H. C. N. Tolentino, J. M. Tonnerre & N. Jaouen, *X-ray
resonant magnetic reflectivity of stratified magnetic structures:
Eigenwave formalism and application to a W/Fe/W trilayer*,
`J. Magn. Magn. Mater. 324, 105 (2012).
<http://www.doi.org/10.1016/j.jmmm.2011.07.019>`_
"""
def __init__(self, S, force_recalc, **kwargs):
super().__init__(S, force_recalc, **kwargs)
self.last_atom_ref_trans_matrices = {'atom_ids': [],
'hashes': [],
'A': [],
'A_phi': [],
'P': [],
'P_phi': [],
'A_inv': [],
'A_inv_phi': [],
'k_z': []}
def __str__(self):
"""String representation of this class"""
class_str = 'Dynamical Magnetic X-Ray Diffraction simulation properties:\n\n'
class_str += super().__str__()
return class_str
def get_hash(self, **kwargs):
"""get_hash
Calculates an unique hash given by the energy :math:`E`, :math:`q_z`
range, polarization states as well as the sample structure hash for
relevant x-ray and magnetic parameters. Optionally, part of the
``strain_map`` and ``magnetization_map`` are used.
Args:
**kwargs (ndarray[float]): spatio-temporal strain and magnetization
profile.
Returns:
hash (str): unique hash.
"""
param = [self.pol_in_state, self.pol_out_state, self._qz, self._energy]
if 'strain_map' in kwargs:
strain_map = kwargs.get('strain_map')
if np.size(strain_map) > 1e6:
strain_map = strain_map.flatten()[0:1000000]
param.append(strain_map)
if 'magnetization_map' in kwargs:
magnetization_map = kwargs.get('magnetization_map')
if np.size(magnetization_map) > 1e6:
magnetization_map = magnetization_map.flatten()[0:1000000]
param.append(magnetization_map)
return self.S.get_hash(types=['xray', 'magnetic']) + '_' + make_hash_md5(param)
def set_incoming_polarization(self, pol_in_state):
"""set_incoming_polarization
Sets the incoming polarization factor for circular +, circular -, sigma,
pi, and unpolarized polarization.
Args:
pol_in_state (int): incoming polarization state id.
"""
self.pol_in_state = pol_in_state
if (self.pol_in_state == 1): # circ +
self.pol_in = np.array([-np.sqrt(.5), -1j*np.sqrt(.5)], dtype=np.cfloat)
elif (self.pol_in_state == 2): # circ -
self.pol_in = np.array([np.sqrt(.5), -1j*np.sqrt(.5)], dtype=np.cfloat)
elif (self.pol_in_state == 3): # sigma
self.pol_in = np.array([1, 0], dtype=np.cfloat)
elif (self.pol_in_state == 4): # pi
self.pol_in = np.array([0, 1], dtype=np.cfloat)
else: # unpolarized
self.pol_in_state = 0 # catch any number and set state to 0
self.pol_in = np.array([np.sqrt(.5), np.sqrt(.5)], dtype=np.cfloat)
self.disp_message('incoming polarizations set to: {:s}'.format(
self.polarizations[self.pol_in_state]))
def set_outgoing_polarization(self, pol_out_state):
"""set_outgoing_polarization
Sets the outgoing polarization factor for circular +, circular -, sigma,
pi, and unpolarized polarization.
Args:
pol_out_state (int): outgoing polarization state id.
"""
self.pol_out_state = pol_out_state
if (self.pol_out_state == 1): # circ +
self.pol_out = np.array([-np.sqrt(.5), 1j*np.sqrt(.5)], dtype=np.cfloat)
elif (self.pol_out_state == 2): # circ -
self.pol_out = np.array([np.sqrt(.5), 1j*np.sqrt(.5)], dtype=np.cfloat)
elif (self.pol_out_state == 3): # sigma
self.pol_out = np.array([1, 0], dtype=np.cfloat)
elif (self.pol_out_state == 4): # pi
self.pol_out = np.array([0, 1], dtype=np.cfloat)
else: # no analyzer
self.pol_out_state = 0 # catch any number and set state to 0
self.pol_out = np.array([], dtype=np.cfloat)
self.disp_message('analyzer polarizations set to: {:s}'.format(
self.polarizations[self.pol_out_state]))
def homogeneous_reflectivity(self, *args):
r"""homogeneous_reflectivity
Calculates the reflectivity :math:`R` of the whole sample structure
allowing only for homogeneous strain and magnetization.
The reflection-transmission matrices
.. math:: RT = A_f^{-1} \prod_m \left( A_m P_m A_m^{-1} \right) A_0
are calculated for every substructure :math:`m` before post-processing
the incoming and analyzer polarizations and calculating the actual
reflectivities as function of energy and :math:`q_z`.
Args:
args (ndarray[float], optional): strains and magnetization for each
sub-structure.
Returns:
(tuple):
- *R (ndarray[float])* - homogeneous reflectivity.
- *R_phi (ndarray[float])* - homogeneous reflectivity for opposite
magnetization.
"""
t1 = time()
self.disp_message('Calculating _homogeneous_reflectivity_ ...')
# vacuum boundary
A0, A0_phi, _, _, _, _, k_z_0 = self.get_atom_boundary_phase_matrix([], 0, 0)
# calc the reflectivity-transmission matrix of the structure
# and the inverse of the last boundary matrix
RT, RT_phi, last_A, last_A_phi, last_A_inv, last_A_inv_phi, last_k_z = \
self.calc_homogeneous_matrix(self.S, A0, A0_phi, k_z_0, *args)
# if a substrate is included add it at the end
if self.S.substrate != []:
RT_sub, RT_sub_phi, last_A, last_A_phi, last_A_inv, last_A_inv_phi, _ = \
self.calc_homogeneous_matrix(
self.S.substrate, last_A, last_A_phi, last_k_z)
RT = m_times_n(RT_sub, RT)
RT_phi = m_times_n(RT_sub_phi, RT_phi)
# multiply the result of the structure with the boundary matrix
# of vacuum (initial layer) and the final layer
RT = m_times_n(last_A_inv, m_times_n(last_A, RT))
RT_phi = m_times_n(last_A_inv_phi, m_times_n(last_A_phi, RT_phi))
# calc the actual reflectivity and transmissivity from the matrix
R, T = XrayDynMag.calc_reflectivity_transmissivity_from_matrix(
RT, self.pol_in, self.pol_out)
R_phi, T_phi = XrayDynMag.calc_reflectivity_transmissivity_from_matrix(
RT_phi, self.pol_in, self.pol_out)
self.disp_message('Elapsed time for _homogeneous_reflectivity_: {:f} s'.format(time()-t1))
return R, R_phi, T, T_phi
def calc_homogeneous_matrix(self, S, last_A, last_A_phi, last_k_z, *args):
r"""calc_homogeneous_matrix
Calculates the product of all reflection-transmission matrices of the
sample structure
.. math:: RT = \prod_m \left(P_m A_m^{-1} A_{m-1} \right)
If the sub-structure :math:`m` consists of :math:`N` unit cells
the matrix exponential rule is applied:
.. math:: RT_m = \left( P_{UC} A_{UC}^{-1} A_{UC} \right)^N
Roughness is also included by a gaussian width
Args:
S (Structure, UnitCell, AmorphousLayer): structure, sub-structure,
unit cell or amorphous layer to calculate on.
last_A (ndarray[complex]): last atom boundary matrix.
last_A_phi (ndarray[complex]): last atom boundary matrix for opposite
magnetization.
last_k_z (ndarray[float]): last internal wave vector
args (ndarray[float], optional): strains and magnetization for each
sub-structure.
Return:
(tuple):
- *RT (ndarray[complex])* - reflection-transmission matrix.
- *RT_phi (ndarray[complex])* - reflection-transmission matrix for
opposite magnetization.
- *A (ndarray[complex])* - atom boundary matrix.
- *A_phi (ndarray[complex])* - atom boundary matrix for opposite
magnetization.
- *A_inv (ndarray[complex])* - inverted atom boundary matrix.
- *A_inv_phi (ndarray[complex])* - inverted atom boundary matrix for
opposite magnetization.
- *k_z (ndarray[float])* - internal wave vector.
"""
# if no strains are given we assume no strain (1)
if len(args) == 0:
strains = np.zeros([S.get_number_of_sub_structures(), 1])
else:
strains = args[0]
if len(args) < 2:
# create non-working magnetizations
magnetizations = np.zeros([S.get_number_of_sub_structures(), 1])
else:
magnetizations = args[1]
layer_counter = 0
# traverse substructures
for i, sub_structure in enumerate(S.sub_structures):
layer = sub_structure[0]
repetitions = sub_structure[1]
if isinstance(layer, UnitCell):
# the sub_structure is an unitCell
# calculate the ref-trans matrices for N unitCells
RT_uc, RT_uc_phi, A, A_phi, A_inv, A_inv_phi, k_z = \
self.calc_uc_boundary_phase_matrix(
layer, last_A, last_A_phi, last_k_z, strains[layer_counter],
magnetizations[layer_counter])
temp = RT_uc
temp_phi = RT_uc_phi
if repetitions > 1:
# use m_power_x for more than one repetition
temp2, temp2_phi, A, A_phi, A_inv, A_inv_phi, k_z = \
self.calc_uc_boundary_phase_matrix(
layer, A, A_phi, k_z, strains[layer_counter],
magnetizations[layer_counter])
temp2 = m_power_x(temp2, repetitions-1)
temp2_phi = m_power_x(temp2_phi, repetitions-1)
temp = m_times_n(temp2, temp)
temp_phi = m_times_n(temp2_phi, temp_phi)
layer_counter += 1
elif isinstance(layer, AmorphousLayer):
# the sub_structure is an amorphous layer
# calculate the ref-trans matrices for N layers
A, A_phi, P, P_phi, A_inv, A_inv_phi, k_z = \
self.get_atom_boundary_phase_matrix(layer.atom,
layer._density*(
strains[layer_counter]+1),
layer._thickness*(
strains[layer_counter]+1),
magnetizations[layer_counter])
roughness = layer._roughness
F = m_times_n(A_inv, last_A)
F_phi = m_times_n(A_inv_phi, last_A_phi)
if roughness > 0:
W = XrayDynMag.calc_roughness_matrix(roughness, k_z, last_k_z)
F = F * W
F_phi = F_phi * W
RT_amorph = m_times_n(P, F)
RT_amorph_phi = m_times_n(P_phi, F_phi)
temp = RT_amorph
temp_phi = RT_amorph_phi
if repetitions > 1:
# use m_power_x for more than one repetition
F = m_times_n(A_inv, A)
F_phi = m_times_n(A_inv_phi, A_phi)
RT_amorph = m_times_n(P, F)
RT_amorph_phi = m_times_n(P_phi, F_phi)
temp = m_times_n(m_power_x(RT_amorph, repetitions-1), temp)
temp_phi = m_times_n(m_power_x(RT_amorph_phi, repetitions-1), temp_phi)
layer_counter += 1
else:
# its a structure
# make a recursive call
temp, temp_phi, A, A_phi, A_inv, A_inv_phi, k_z = self.calc_homogeneous_matrix(
layer, last_A, last_A_phi, last_k_z,
strains[layer_counter:(
layer_counter
+ layer.get_number_of_sub_structures()
)],
magnetizations[layer_counter:(
layer_counter
+ layer.get_number_of_sub_structures()
)])
# calculate the ref-trans matrices for N sub structures
if repetitions > 1:
# use m_power_x for more than one repetition
temp2, temp2_phi, A, A_phi, A_inv, A_inv_phi, k_z = \
self.calc_homogeneous_matrix(
layer, A, A_phi, k_z,
strains[layer_counter:(layer_counter
+ layer.get_number_of_sub_structures())],
magnetizations[layer_counter:(layer_counter
+ layer.get_number_of_sub_structures())])
temp = m_times_n(m_power_x(temp2, repetitions-1), temp)
temp_phi = m_times_n(m_power_x(temp2_phi, repetitions-1), temp_phi)
layer_counter = layer_counter+layer.get_number_of_sub_structures()
# multiply it to the output
if i == 0:
RT = temp
RT_phi = temp_phi
else:
RT = m_times_n(temp, RT)
RT_phi = m_times_n(temp_phi, RT_phi)
# update the last A and k_z
last_A = A
last_A_phi = A_phi
last_k_z = k_z
return RT, RT_phi, A, A_phi, A_inv, A_inv_phi, k_z
def inhomogeneous_reflectivity(self, strain_map=np.array([]),
magnetization_map=np.array([]), **kwargs):
"""inhomogeneous_reflectivity
Returns the reflectivity and transmissivity of an inhomogeneously
strained and magnetized sample structure for a given _strain_map_
and _magnetization_map_ in space and time for each unit cell or
amorphous layer in the sample structure. If no reflectivity is
saved in the cache it is caluclated. Providing the ``calc_type``
for the calculation the corresponding sub-routines for the
reflectivity computation are called:
* ``parallel`` parallelization over the time steps utilizing
`Dask <https://dask.org/>`_
* ``distributed`` not implemented in Python, but should be possible
with `Dask <https://dask.org/>`_ as well
* ``sequential`` no parallelization at all
Args:
strain_map (ndarray[float], optional): spatio-temporal strain
profile.
magnetization_map (ndarray[float], optional): spatio-temporal
magnetization profile.
**kwargs:
- *calc_type (str)* - type of calculation.
- *dask_client (Dask.Client)* - Dask client.
- *job (Dask.job)* - Dask job.
- *num_workers (int)* - Dask number of workers.
Returns:
(tuple):
- *R (ndarray[float])* - inhomogeneous reflectivity.
- *R_phi (ndarray[float])* - inhomogeneous reflectivity for opposite
magnetization.
- *T (ndarray[float])* - inhomogeneous transmissivity.
- *T_phi (ndarray[float])* - inhomogeneous transmissivity for opposite
magnetization.
"""
# create a hash of all simulation parameters
filename = 'inhomogeneous_reflectivity_dynMag_' \
+ self.get_hash(strain_map=strain_map, magnetization_map=magnetization_map) \
+ '.npz'
full_filename = path.abspath(path.join(self.cache_dir, filename))
# check if we find some corresponding data in the cache dir
if path.exists(full_filename) and not self.force_recalc:
# found something so load it
tmp = np.load(full_filename)
R = tmp['R']
R_phi = tmp['R_phi']
T = tmp['T']
T_phi = tmp['T_phi']
self.disp_message('_inhomogeneous_reflectivity_ loaded from file:\n\t' + filename)
else:
t1 = time()
self.disp_message('Calculating _inhomogeneous_reflectivity_ ...')
# parse the input arguments
if not isinstance(strain_map, np.ndarray):
raise TypeError('strain_map must be a numpy ndarray!')
if not isinstance(magnetization_map, np.ndarray):
raise TypeError('magnetization_map must be a numpy ndarray!')
dask_client = kwargs.get('dask_client', [])
calc_type = kwargs.get('calc_type', 'sequential')
if calc_type not in ['parallel', 'sequential', 'distributed']:
raise TypeError('calc_type must be either _parallel_, '
'_sequential_, or _distributed_!')
job = kwargs.get('job')
num_workers = kwargs.get('num_workers', 1)
M = np.size(strain_map, 0)
N = np.size(magnetization_map, 0)
if (M == 0) and (N > 0):
strain_map = np.zeros([np.size(magnetization_map, 0),
np.size(magnetization_map, 1)])
elif (M > 0) and (N == 0):
magnetization_map = np.zeros_like(strain_map)
elif (M == 0) and (N == 0):
raise ValueError('At least a strain_map or magnetzation_map must be given!')
else:
if M != N:
raise ValueError('The strain_map and magnetzation_map must '
'have the same number of delay steps!')
# select the type of computation
if calc_type == 'parallel':
R, R_phi, T, T_phi = self.parallel_inhomogeneous_reflectivity(
strain_map, magnetization_map, dask_client)
elif calc_type == 'distributed':
R, R_phi, T, T_phi = self.distributed_inhomogeneous_reflectivity(
strain_map, magnetization_map, job, num_workers)
else: # sequential
R, R_phi, T, T_phi = self.sequential_inhomogeneous_reflectivity(
strain_map, magnetization_map)
self.disp_message('Elapsed time for _inhomogeneous_reflectivity_:'
' {:f} s'.format(time()-t1))
self.save(full_filename, {'R': R, 'R_phi': R_phi, 'T': T, 'T_phi': T_phi},
'_inhomogeneous_reflectivity_')
return R, R_phi, T, T_phi
def sequential_inhomogeneous_reflectivity(self, strain_map, magnetization_map):
"""sequential_inhomogeneous_reflectivity
Returns the reflectivity and transmission of an inhomogeneously strained
sample structure for a given ``strain_map`` and ``magnetization_map`` in
space and time. The function calculates the results sequentially for every
layer without parallelization.
Args:
strain_map (ndarray[float]): spatio-temporal strain profile.
magnetization_map (ndarray[float]): spatio-temporal magnetization
profile.
Returns:
(tuple):
- *R (ndarray[float])* - inhomogeneous reflectivity.
- *R_phi (ndarray[float])* - inhomogeneous reflectivity for opposite
magnetization.
- *T (ndarray[float])* - inhomogeneous transmission.
- *T_phi (ndarray[float])* - inhomogeneous transmission for opposite
magnetization.
"""
# initialize
M = np.size(strain_map, 0) # delay steps
R = np.zeros([M, np.size(self._qz, 0), np.size(self._qz, 1)])
R_phi = np.zeros_like(R)
T = np.zeros_like(R)
T_phi = np.zeros_like(R)
if self.progress_bar:
iterator = trange(M, desc='Progress', leave=True)
else:
iterator = range(M)
for i in iterator:
# get the inhomogeneous reflectivity of the sample
# structure for each time step of the strain map
# vacuum boundary
A0, A0_phi, _, _, _, _, k_z_0 = self.get_atom_boundary_phase_matrix([], 0, 0)
RT, RT_phi, last_A, last_A_phi, last_A_inv, last_A_inv_phi, last_k_z = \
self.calc_inhomogeneous_matrix(
A0, A0_phi, k_z_0, strain_map[i, :], magnetization_map[i, :])
# if a substrate is included add it at the end
if self.S.substrate != []:
RT_sub, RT_sub_phi, last_A, last_A_phi, last_A_inv, last_A_inv_phi, _ = \
self.calc_homogeneous_matrix(
self.S.substrate, last_A, last_A_phi, last_k_z)
RT = m_times_n(RT_sub, RT)
RT_phi = m_times_n(RT_sub_phi, RT_phi)
# multiply vacuum and last layer
RT = m_times_n(last_A_inv, m_times_n(last_A, RT))
RT_phi = m_times_n(last_A_inv_phi, m_times_n(last_A_phi, RT_phi))
R[i, :, :], T[i, :, :] = XrayDynMag.calc_reflectivity_transmissivity_from_matrix(
RT, self.pol_in, self.pol_out)
R_phi[i, :, :], T_phi[i, :, :] = \
XrayDynMag.calc_reflectivity_transmissivity_from_matrix(
RT_phi, self.pol_in, self.pol_out)
return R, R_phi, T, T_phi
def parallel_inhomogeneous_reflectivity(self, strain_map, magnetization_map, dask_client):
"""parallel_inhomogeneous_reflectivity
Returns the reflectivity and transmission of an inhomogeneously strained
sample structure for a given ``strain_map`` and ``magnetization_map`` in
space and time. The function tries to parallelize the calculation over the
time steps, since the results do not depend on each other.
Args:
strain_map (ndarray[float]): spatio-temporal strain profile.
magnetization_map (ndarray[float]): spatio-temporal magnetization
profile.
dask_client (Dask.Client): Dask client.
Returns:
(tuple):
- *R (ndarray[float])* - inhomogeneous reflectivity.
- *R_phi (ndarray[float])* - inhomogeneous reflectivity for opposite
magnetization.
- *T (ndarray[float])* - inhomogeneous transmission.
- *T_phi (ndarray[float])* - inhomogeneous transmission for opposite
magnetization.
"""
if not dask_client:
raise ValueError('no dask client set')
from dask import delayed # to allow parallel computation
# initialize
res = []
M = np.size(strain_map, 0) # delay steps
N = np.size(self._qz, 0) # energy steps
K = np.size(self._qz, 1) # qz steps
R = np.zeros([M, N, K])
R_phi = np.zeros_like(R)
T = np.zeros_like(R)
T_phi = np.zeros_like(R)
# vacuum boundary
A0, A0_phi, _, _, _, _, k_z_0 = self.get_atom_boundary_phase_matrix([], 0, 0)
remote_A0 = dask_client.scatter(A0)
remote_A0_phi = dask_client.scatter(A0_phi)
remote_k_z_0 = dask_client.scatter(k_z_0)
remote_pol_in = dask_client.scatter(self.pol_in)
remote_pol_out = dask_client.scatter(self.pol_out)
if self.S.substrate != []:
remote_substrate = dask_client.scatter(self.S.substrate)
# create dask.delayed tasks for all delay steps
for i in range(M):
t = delayed(self.calc_inhomogeneous_matrix)(remote_A0,
remote_A0_phi,
remote_k_z_0,
strain_map[i, :],
magnetization_map[i, :])
RT = t[0]
RT_phi = t[1]
last_A = t[2]
last_A_phi = t[3]
last_A_inv = t[4]
last_A_inv_phi = t[5]
last_k_z = t[6]
if self.S.substrate != []:
t2 = delayed(self.calc_homogeneous_matrix)(
remote_substrate, last_A, last_A_phi, last_k_z)
RT_sub = t2[0]
RT_sub_phi = t2[1]
last_A = t2[2]
last_A_phi = t2[3]
last_A_inv = t2[4]
last_A_inv_phi = t2[5]
RT = delayed(m_times_n)(RT_sub, RT)
RT_phi = delayed(m_times_n)(RT_sub_phi, RT_phi)
# multiply vacuum and last layer
temp = delayed(m_times_n)(last_A, RT)
temp_phi = delayed(m_times_n)(last_A_phi, RT_phi)
RT = delayed(m_times_n)(last_A_inv, temp)
RT_phi = delayed(m_times_n)(last_A_inv_phi, temp_phi)
RTi = delayed(XrayDynMag.calc_reflectivity_transmissivity_from_matrix)(
RT, remote_pol_in, remote_pol_out)
RTi_phi = delayed(XrayDynMag.calc_reflectivity_transmissivity_from_matrix)(
RT_phi, remote_pol_in, remote_pol_out)
res.append(RTi[0])
res.append(RTi_phi[0])
res.append(RTi[1])
res.append(RTi_phi[1])
# compute results
res = dask_client.compute(res, sync=True)
# reorder results to reflectivity matrix
for i in range(M):
R[i, :, :] = res[4*i]
R_phi[i, :, :] = res[4*i + 1]
T[i, :, :] = res[4*i + 2]
T_phi[i, :, :] = res[4*i + 3]
return R, R_phi, T, T_phi
def distributed_inhomogeneous_reflectivity(self, strain_map, magnetization_map,
job, num_worker,):
"""distributed_inhomogeneous_reflectivity
This is a stub. Not yet implemented in python.
Args:
strain_map (ndarray[float]): spatio-temporal strain profile.
magnetization_map (ndarray[float]): spatio-temporal magnetization
profile.
job (Dask.job): Dask job.
num_workers (int): Dask number of workers.
Returns:
(tuple):
- *R (ndarray[float])* - inhomogeneous reflectivity.
- *R_phi (ndarray[float])* - inhomogeneous reflectivity for opposite
magnetization.
"""
raise NotImplementedError
def calc_inhomogeneous_matrix(self, last_A, last_A_phi, last_k_z, strains, magnetizations):
r"""calc_inhomogeneous_matrix
Calculates the product of all reflection-transmission matrices of the
sample structure for every atomic layer.
.. math:: RT = \prod_m \left( P_m A_m^{-1} A_{m-1} \right)
Args:
last_A (ndarray[complex]): last atom boundary matrix.
last_A_phi (ndarray[complex]): last atom boundary matrix for opposite
magnetization.
last_k_z (ndarray[float]): last internal wave vector
strains (ndarray[float]): spatial strain profile for single time
step.
magnetizations (ndarray[float]): spatial magnetization profile for
single time step.
Returns:
(tuple):
- *RT (ndarray[complex])* - reflection-transmission matrix.
- *RT_phi (ndarray[complex])* - reflection-transmission matrix for
opposite magnetization.
- *A (ndarray[complex])* - atom boundary matrix.
- *A_phi (ndarray[complex])* - atom boundary matrix for opposite
magnetization.
- *A_inv (ndarray[complex])* - inverted atom boundary matrix.
- *A_inv_phi (ndarray[complex])* - inverted atom boundary matrix for
opposite magnetization.
- *k_z (ndarray[float])* - internal wave vector.
"""
L = self.S.get_number_of_layers() # number of unit cells
_, _, layer_handles = self.S.get_layer_vectors()
# for inhomogeneous results we do not store results and force a re-calc
force_recalc = True
for i in range(L):
layer = layer_handles[i]
if isinstance(layer, UnitCell):
RT_layer, RT_layer_phi, A, A_phi, A_inv, A_inv_phi, k_z = \
self.calc_uc_boundary_phase_matrix(
layer, last_A, last_A_phi, last_k_z, strains[i],
magnetizations[i], force_recalc)
elif isinstance(layer, AmorphousLayer):
A, A_phi, P, P_phi, A_inv, A_inv_phi, k_z = \
self.get_atom_boundary_phase_matrix(
layer.atom, layer._density/(strains[i]+1), layer._thickness*(strains[i]+1),
force_recalc, magnetizations[i])
roughness = layer._roughness
F = m_times_n(A_inv, last_A)
F_phi = m_times_n(A_inv_phi, last_A_phi)
if roughness > 0:
W = XrayDynMag.calc_roughness_matrix(roughness, k_z, last_k_z)
F = F * W
F_phi = F_phi * W
RT_layer = m_times_n(P, F)
RT_layer_phi = m_times_n(P_phi, F_phi)
else:
raise ValueError('All layers must be either AmorphousLayers or UnitCells!')
if i == 0:
RT = RT_layer
RT_phi = RT_layer_phi
else:
RT = m_times_n(RT_layer, RT)
RT_phi = m_times_n(RT_layer_phi, RT_phi)
# update the last A and k_z
last_A = A
last_A_phi = A_phi
last_k_z = k_z
return RT, RT_phi, A, A_phi, A_inv, A_inv_phi, k_z
def calc_uc_boundary_phase_matrix(self, uc, last_A, last_A_phi, last_k_z, strain,
magnetization, force_recalc=False):
r"""calc_uc_boundary_phase_matrix
Calculates the product of all reflection-transmission matrices of
a single unit cell for a given strain:
.. math:: RT = \prod_m \left( P_m A_m^{-1} A_{m-1}\right)
and returns also the last matrices :math:`A, A^{-1}, k_z`.
Args:
uc (UnitCell): unit cell
last_A (ndarray[complex]): last atom boundary matrix.
last_A_phi (ndarray[complex]): last atom boundary matrix for opposite
magnetization.
last_k_z (ndarray[float]): last internal wave vector
strain (float): strain of unit cell for a single time
step.
magnetization (ndarray[float]): magnetization of unit cell for
a single time step.
force_recalc (boolean, optional): force recalculation of boundary
phase matrix if True. Defaults to False.
Returns:
(tuple):
- *RT (ndarray[complex])* - reflection-transmission matrix.
- *RT_phi (ndarray[complex])* - reflection-transmission matrix for
opposite magnetization.
- *A (ndarray[complex])* - atom boundary matrix.
- *A_phi (ndarray[complex])* - atom boundary matrix for opposite
magnetization.
- *A_inv (ndarray[complex])* - inverted atom boundary matrix.
- *A_inv_phi (ndarray[complex])* - inverted atom boundary matrix for
opposite magnetization.
- *k_z (ndarray[float])* - internal wave vector.
"""
K = uc.num_atoms # number of atoms
force_recalc = True
for j in range(K):
if j == (K-1): # its the last atom
del_dist = (strain+1)-uc.atoms[j][1](strain)
else:
del_dist = uc.atoms[j+1][1](strain)-uc.atoms[j][1](strain)
distance = del_dist*uc._c_axis
try:
# calculate density
if distance == 0:
density = 0
else:
density = uc.atoms[j][0]._mass/(uc._area*distance)
except AttributeError:
density = 0
A, A_phi, P, P_phi, A_inv, A_inv_phi, k_z = \
self.get_atom_boundary_phase_matrix(uc.atoms[j][0], density, distance,
force_recalc, magnetization)
F = m_times_n(A_inv, last_A)
F_phi = m_times_n(A_inv_phi, last_A_phi)
if (j == 0) and (uc._roughness > 0):
# it is the first layer so care for the roughness
W = XrayDynMag.calc_roughness_matrix(uc._roughness, k_z, last_k_z)
F = F * W
F_phi = F_phi * W
temp = m_times_n(P, F)
temp_phi = m_times_n(P_phi, F_phi)
if j == 0:
RT = temp
RT_phi = temp_phi
else:
RT = m_times_n(temp, RT)
RT_phi = m_times_n(temp_phi, RT_phi)
# update last A and k_z
last_A = A
last_A_phi = A_phi
last_k_z = k_z
return RT, RT_phi, A, A_phi, A_inv, A_inv_phi, k_z
def get_atom_boundary_phase_matrix(self, atom, density, distance,
force_recalc=False, *args):
"""get_atom_boundary_phase_matrix
Returns the boundary and phase matrices of an atom from Elzo
formalism [10]_. The results for a given atom, energy, :math:`q_z`,
polarization, and magnetization are stored to RAM to avoid recalculation.
Args:
atom (Atom, AtomMixed): atom or mixed atom.
density (float): density around the atom [kg/m³].
distance (float): distance towards the next atomic [m].
force_recalc (boolean, optional): force recalculation of boundary
phase matrix if True. Defaults to False.
args (ndarray[float]): magnetization vector.
Returns:
(tuple):
- *A (ndarray[complex])* - atom boundary matrix.
- *A_phi (ndarray[complex])* - atom boundary matrix for opposite
magnetization.
- *P (ndarray[complex])* - atom phase matrix.
- *P_phi (ndarray[complex])* - atom phase matrix for opposite
magnetization.
- *A_inv (ndarray[complex])* - inverted atom boundary matrix.
- *A_inv_phi (ndarray[complex])* - inverted atom boundary matrix for
opposite magnetization.
- *k_z (ndarray[float])* - internal wave vector.
"""
try:
index = self.last_atom_ref_trans_matrices['atom_ids'].index(atom.id)
except ValueError:
index = -1
except AttributeError:
# its vacuum
A, A_phi, P, P_phi, A_inv, A_inv_phi, k_z = \
self.calc_atom_boundary_phase_matrix(atom, density, distance, *args)
return A, A_phi, P, P_phi, A_inv, A_inv_phi, k_z
if force_recalc:
# just calculate and and do not remember the results to save
# computational time
A, A_phi, P, P_phi, A_inv, A_inv_phi, k_z = \
self.calc_atom_boundary_phase_matrix(atom, density, distance, *args)
else:
# check for already calculated data
_hash = make_hash_md5([self._energy, self._qz, self.pol_in, self.pol_out,
density, distance,
atom.mag_amplitude,
atom.mag_gamma,
atom.mag_phi,
*args])
if (index >= 0) and (_hash == self.last_atom_ref_trans_matrices['hashes'][index]):
# These are the same X-ray parameters as last time so we
# can use the same matrix again for this atom
A = self.last_atom_ref_trans_matrices['A'][index]
A_phi = self.last_atom_ref_trans_matrices['A_phi'][index]
P = self.last_atom_ref_trans_matrices['P'][index]
P_phi = self.last_atom_ref_trans_matrices['P_phi'][index]
A_inv = self.last_atom_ref_trans_matrices['A_inv'][index]
A_inv_phi = self.last_atom_ref_trans_matrices['A_inv_phi'][index]
k_z = self.last_atom_ref_trans_matrices['k_z'][index]
else:
# These are new parameters so we have to calculate.
# Get the reflection-transmission-factors
A, A_phi, P, P_phi, A_inv, A_inv_phi, k_z = \
self.calc_atom_boundary_phase_matrix(atom, density, distance, *args)
# remember this matrix for next use with the same
# parameters for this atom
if index >= 0:
self.last_atom_ref_trans_matrices['atom_ids'][index] = atom.id
self.last_atom_ref_trans_matrices['hashes'][index] = _hash
self.last_atom_ref_trans_matrices['A'][index] = A
self.last_atom_ref_trans_matrices['A_phi'][index] = A_phi
self.last_atom_ref_trans_matrices['P'][index] = P
self.last_atom_ref_trans_matrices['P_phi'][index] = P_phi
self.last_atom_ref_trans_matrices['A_inv'][index] = A_inv
self.last_atom_ref_trans_matrices['A_inv_phi'][index] = A_inv_phi
self.last_atom_ref_trans_matrices['k_z'][index] = k_z
else:
self.last_atom_ref_trans_matrices['atom_ids'].append(atom.id)
self.last_atom_ref_trans_matrices['hashes'].append(_hash)
self.last_atom_ref_trans_matrices['A'].append(A)
self.last_atom_ref_trans_matrices['A_phi'].append(A_phi)
self.last_atom_ref_trans_matrices['P'].append(P)
self.last_atom_ref_trans_matrices['P_phi'].append(P_phi)
self.last_atom_ref_trans_matrices['A_inv'].append(A_inv)
self.last_atom_ref_trans_matrices['A_inv_phi'].append(A_inv_phi)
self.last_atom_ref_trans_matrices['k_z'].append(k_z)
return A, A_phi, P, P_phi, A_inv, A_inv_phi, k_z
def calc_atom_boundary_phase_matrix(self, atom, density, distance, *args):
"""calc_atom_boundary_phase_matrix
Calculates the boundary and phase matrices of an atom from Elzo
formalism [10]_.
Args:
atom (Atom, AtomMixed): atom or mixed atom.
density (float): density around the atom [kg/m³].
distance (float): distance towards the next atomic [m].
args (ndarray[float]): magnetization vector.
Returns:
(tuple):
- *A (ndarray[complex])* - atom boundary matrix.
- *A_phi (ndarray[complex])* - atom boundary matrix for opposite
magnetization.
- *P (ndarray[complex])* - atom phase matrix.
- *P_phi (ndarray[complex])* - atom phase matrix for opposite
magnetization.
- *A_inv (ndarray[complex])* - inverted atom boundary matrix.
- *A_inv_phi (ndarray[complex])* - inverted atom boundary matrix for
opposite magnetization.
- *k_z (ndarray[float])* - internal wave vector.
"""
try:
magnetization = args[0]
mag_amplitude = magnetization[0]
mag_phi = magnetization[1]
mag_gamma = magnetization[2]
except IndexError:
# here we catch magnetizations with only one instead of three
# elements
try:
mag_amplitude = atom.mag_amplitude
except AttributeError:
mag_amplitude = 0
try:
mag_phi = atom._mag_phi
except AttributeError:
mag_phi = 0
try:
mag_gamma = atom._mag_gamma
except AttributeError:
mag_gamma = 0
M = len(self._energy) # number of energies
N = np.shape(self._qz)[1] # number of q_z
U = [np.sin(mag_phi) *
np.cos(mag_gamma),
np.sin(mag_phi) *
np.sin(mag_gamma),
np.cos(mag_phi)]
eps = np.zeros([M, N, 3, 3], dtype=np.cfloat)
A = np.zeros([M, N, 4, 4], dtype=np.cfloat)
A_phi = np.zeros_like(A, dtype=np.cfloat)
P = np.zeros_like(A, dtype=np.cfloat)
P_phi = np.zeros_like(A, dtype=np.cfloat)
try:
molar_density = density/1000/atom.mass_number_a
except AttributeError:
molar_density = 0
energy = self._energy
factor = 830.9471/energy**2
theta = self._theta
try:
cf = atom.get_atomic_form_factor(energy)
except AttributeError:
cf = np.zeros_like(energy, dtype=np.cfloat)
try:
mf = atom.get_magnetic_form_factor(energy)
except AttributeError:
mf = np.zeros_like(energy, dtype=np.cfloat)
mag = factor * molar_density * mag_amplitude * mf
mag = np.tile(mag[:, np.newaxis], [1, N])
eps0 = 1 - factor*molar_density*cf
eps0 = np.tile(eps0[:, np.newaxis], [1, N])
eps[:, :, 0, 0] = eps0
eps[:, :, 0, 1] = -1j * U[2] * mag
eps[:, :, 0, 2] = 1j * U[1] * mag
eps[:, :, 1, 0] = -eps[:, :, 0, 1]
eps[:, :, 1, 1] = eps0
eps[:, :, 1, 2] = -1j * U[0] * mag
eps[:, :, 2, 0] = -eps[:, :, 0, 2]
eps[:, :, 2, 1] = -eps[:, :, 1, 2]
eps[:, :, 2, 2] = eps0
alpha_y = np.divide(np.cos(theta), np.sqrt(eps[:, :, 0, 0]))
alpha_z = np.sqrt(1 - alpha_y**2)
# reshape self._k for elementwise multiplication
k = np.reshape(np.repeat(self._k, N), (M, N))
k_z = k * (np.sqrt(eps[:, :, 0, 0]) * alpha_z)
n_right_down = np.sqrt(eps[:, :, 0, 0] - 1j * eps[:, :, 0, 2] * alpha_y
- 1j * eps[:, :, 0, 1] * alpha_z)
n_left_down = np.sqrt(eps[:, :, 0, 0] + 1j * eps[:, :, 0, 2] * alpha_y
+ 1j * eps[:, :, 0, 1] * alpha_z)
n_right_up = np.sqrt(eps[:, :, 0, 0] - 1j * eps[:, :, 0, 2] * alpha_y
+ 1j * eps[:, :, 0, 1] * alpha_z)
n_left_up = np.sqrt(eps[:, :, 0, 0] + 1j * eps[:, :, 0, 2] * alpha_y
- 1j * eps[:, :, 0, 1] * alpha_z)
alpha_y_right_down = np.cos(theta)/n_right_down
alpha_z_right_down = np.sqrt(1-alpha_y_right_down**2)
alpha_y_left_down = np.cos(theta)/n_left_down
alpha_z_left_down = np.sqrt(1-alpha_y_left_down**2)
alpha_y_right_up = np.cos(theta)/n_right_up
alpha_z_right_up = np.sqrt(1-alpha_y_right_up**2)
alpha_y_left_up = np.cos(theta)/n_left_up
alpha_z_left_up = np.sqrt(1-alpha_y_left_up**2)
A[:, :, 0, 0] = (-1 - 1j * eps[:, :, 0, 1] * alpha_z_right_down
- 1j * eps[:, :, 0, 2] * alpha_y_right_down)
A[:, :, 0, 1] = (1 - 1j * eps[:, :, 0, 1] * alpha_z_left_down
- 1j * eps[:, :, 0, 2] * alpha_y_left_down)
A[:, :, 0, 2] = (-1 + 1j * eps[:, :, 0, 1] * alpha_z_right_up
- 1j * eps[:, :, 0, 2] * alpha_y_right_up)
A[:, :, 0, 3] = (1 + 1j * eps[:, :, 0, 1] * alpha_z_left_up
- 1j * eps[:, :, 0, 2] * alpha_y_left_up)
A[:, :, 1, 0] = (1j * alpha_z_right_down - eps[:, :, 0, 1]
- 1j * eps[:, :, 1, 2] * alpha_y_right_down)
A[:, :, 1, 1] = (1j * alpha_z_left_down + eps[:, :, 0, 1]
- 1j * eps[:, :, 1, 2] * alpha_y_left_down)
A[:, :, 1, 2] = (-1j * alpha_z_right_up - eps[:, :, 0, 1]
- 1j * eps[:, :, 1, 2] * alpha_y_right_up)
A[:, :, 1, 3] = (-1j * alpha_z_left_up + eps[:, :, 0, 1]
- 1j * eps[:, :, 1, 2] * alpha_y_left_up)
A[:, :, 2, 0] = -1j * n_right_down * A[:, :, 0, 0]
A[:, :, 2, 1] = 1j * n_left_down * A[:, :, 0, 1]
A[:, :, 2, 2] = -1j * n_right_up * A[:, :, 0, 2]
A[:, :, 2, 3] = 1j * n_left_up * A[:, :, 0, 3]
A[:, :, 3, 0] = - alpha_z_right_down * n_right_down * A[:, :, 0, 0]
A[:, :, 3, 1] = - alpha_z_left_down * n_left_down * A[:, :, 0, 1]
A[:, :, 3, 2] = alpha_z_right_up * n_right_up * A[:, :, 0, 2]
A[:, :, 3, 3] = alpha_z_left_up * n_left_up * A[:, :, 0, 3]
A_phi[:, :, 0, 0] = (-1 + 1j * eps[:, :, 0, 1] * alpha_z_left_down
+ 1j * eps[:, :, 0, 2] * alpha_y_left_down)
A_phi[:, :, 0, 1] = (1 + 1j * eps[:, :, 0, 1] * alpha_z_right_down
+ 1j * eps[:, :, 0, 2] * alpha_y_right_down)
A_phi[:, :, 0, 2] = (-1 - 1j * eps[:, :, 0, 1] * alpha_z_left_up
+ 1j * eps[:, :, 0, 2] * alpha_y_left_up)
A_phi[:, :, 0, 3] = (1 - 1j * eps[:, :, 0, 1] * alpha_z_right_up
+ 1j * eps[:, :, 0, 2] * alpha_y_right_up)
A_phi[:, :, 1, 0] = (1j * alpha_z_left_down + eps[:, :, 0, 1]
+ 1j * eps[:, :, 1, 2] * alpha_y_left_down)
A_phi[:, :, 1, 1] = (1j * alpha_z_right_down - eps[:, :, 0, 1]
+ 1j * eps[:, :, 1, 2] * alpha_y_right_down)
A_phi[:, :, 1, 2] = (-1j * alpha_z_left_up + eps[:, :, 0, 1]
+ 1j * eps[:, :, 1, 2] * alpha_y_left_up)
A_phi[:, :, 1, 3] = (-1j * alpha_z_right_up - eps[:, :, 0, 1]
+ 1j * eps[:, :, 1, 2] * alpha_y_right_up)
A_phi[:, :, 2, 0] = 1j * n_left_down * A_phi[:, :, 0, 0]
A_phi[:, :, 2, 1] = -1j * n_right_down * A_phi[:, :, 0, 1]
A_phi[:, :, 2, 2] = 1j * n_left_up * A_phi[:, :, 0, 2]
A_phi[:, :, 2, 3] = -1j * n_right_up * A_phi[:, :, 0, 3]
A_phi[:, :, 3, 0] = - alpha_z_left_down * n_left_down * A_phi[:, :, 0, 0]
A_phi[:, :, 3, 1] = - alpha_z_right_down * n_right_down * A_phi[:, :, 0, 1]
A_phi[:, :, 3, 2] = alpha_z_left_up * n_left_up * A_phi[:, :, 0, 2]
A_phi[:, :, 3, 3] = alpha_z_right_up * n_right_up * A_phi[:, :, 0, 3]
A[:, :, :, :] = np.divide(
A[:, :, :, :],
np.sqrt(2) * eps[:, :, 0, 0][:, :, np.newaxis, np.newaxis])
A_phi[:, :, :, :] = np.divide(
A_phi[:, :, :, :],
np.sqrt(2) * eps[:, :, 0, 0][:, :, np.newaxis, np.newaxis])
A_inv = np.linalg.inv(A)
A_inv_phi = np.linalg.inv(A_phi)
phase = self._k * distance
phase = phase[:, np.newaxis]
P[:, :, 0, 0] = np.exp(1j * phase * n_right_down * alpha_z_right_down)
P[:, :, 1, 1] = np.exp(1j * phase * n_left_down * alpha_z_left_down)
P[:, :, 2, 2] = np.exp(-1j * phase * n_right_up * alpha_z_right_up)
P[:, :, 3, 3] = np.exp(-1j * phase * n_left_up * alpha_z_left_up)
P_phi[:, :, 0, 0] = P[:, :, 1, 1]
P_phi[:, :, 1, 1] = P[:, :, 0, 0]
P_phi[:, :, 2, 2] = P[:, :, 3, 3]
P_phi[:, :, 3, 3] = P[:, :, 2, 2]
return A, A_phi, P, P_phi, A_inv, A_inv_phi, k_z
@staticmethod
def calc_reflectivity_transmissivity_from_matrix(RT, pol_in, pol_out):
"""calc_reflectivity_transmissivity_from_matrix
Calculates the actual reflectivity and transmissivity from the
reflectivity-transmission matrix for a given incoming and analyzer
polarization from Elzo formalism [10]_.
Args:
RT (ndarray[complex]): reflection-transmission matrix.
pol_in (ndarray[complex]): incoming polarization factor.
pol_out (ndarray[complex]): outgoing polarization factor.
Returns:
(tuple):
- *R (ndarray[float])* - reflectivity.
- *T (ndarray[float])* - transmissivity.
"""
Ref = np.tile(np.eye(2, 2, dtype=np.cfloat)[np.newaxis, np.newaxis, :, :],
(np.size(RT, 0), np.size(RT, 1), 1, 1))
Trans = np.tile(np.eye(2, 2, dtype=np.cfloat)[np.newaxis, np.newaxis, :, :],
(np.size(RT, 0), np.size(RT, 1), 1, 1))
d = np.divide(1, RT[:, :, 3, 3] * RT[:, :, 2, 2] - RT[:, :, 3, 2] * RT[:, :, 2, 3])
Ref[:, :, 0, 0] = (-RT[:, :, 3, 3] * RT[:, :, 2, 0] + RT[:, :, 2, 3] * RT[:, :, 3, 0]) * d
Ref[:, :, 0, 1] = (-RT[:, :, 3, 3] * RT[:, :, 2, 1] + RT[:, :, 2, 3] * RT[:, :, 3, 1]) * d
Ref[:, :, 1, 0] = (RT[:, :, 3, 2] * RT[:, :, 2, 0] - RT[:, :, 2, 2] * RT[:, :, 3, 0]) * d
Ref[:, :, 1, 1] = (RT[:, :, 3, 2] * RT[:, :, 2, 1] - RT[:, :, 2, 2] * RT[:, :, 3, 1]) * d
Trans[:, :, 0, 0] = (RT[:, :, 0, 0] + RT[:, :, 0, 2] * Ref[:, :, 0, 0]
+ RT[:, :, 0, 3] * Ref[:, :, 1, 0])
Trans[:, :, 0, 1] = (RT[:, :, 0, 1] + RT[:, :, 0, 2] * Ref[:, :, 0, 1]
+ RT[:, :, 0, 3] * Ref[:, :, 1, 1])
Trans[:, :, 1, 0] = (RT[:, :, 1, 0] + RT[:, :, 1, 2] * Ref[:, :, 0, 0]
+ RT[:, :, 1, 3] * Ref[:, :, 1, 0])
Trans[:, :, 1, 1] = (RT[:, :, 1, 1] + RT[:, :, 1, 2] * Ref[:, :, 0, 1]
+ RT[:, :, 1, 3] * Ref[:, :, 1, 1])
Ref = np.matmul(np.matmul(np.array([[-1, 1], [-1j, -1j]]), Ref),
np.array([[-1, 1j], [1, 1j]])*0.5)
Trans = np.matmul(np.matmul(np.array([[-1, 1], [-1j, -1j]]), Trans),
np.array([[-1, 1j], [1, 1j]])*0.5)
if pol_out.size == 0:
# no analyzer polarization
R = np.real(np.matmul(np.square(np.absolute(np.matmul(Ref, pol_in))),
np.array([1, 1], dtype=np.cfloat)))
T = np.real(np.matmul(np.square(np.absolute(np.matmul(Trans, pol_in))),
np.array([1, 1], dtype=np.cfloat)))
else:
R = np.real(np.square(np.absolute(np.matmul(np.matmul(Ref, pol_in), pol_out))))
T = np.real(np.square(np.absolute(np.matmul(np.matmul(Trans, pol_in), pol_out))))
return R, T
@staticmethod
def calc_kerr_effect_from_matrix(RT):
"""calc_kerr_effect_from_matrix
Calculates the Kerr rotation and ellipticity for sigma and pi
incident polarization from the reflectivity-transmission
matrix independent of the given incoming and analyzer polarization
from Elzo formalism [10]_.
Args:
RT (ndarray[complex]): reflection-transmission matrix.
Returns:
K (ndarray[float]): kerr.
"""
raise NotImplementedError
@staticmethod
def calc_roughness_matrix(roughness, k_z, last_k_z):
"""calc_roughness_matrix
Calculates the roughness matrix for an interface with a gaussian
roughness for the Elzo formalism [10]_.
Args:
roughness (float): gaussian roughness of the interface [m].
k_z (ndarray[float)]: internal wave vector.
last_k_z (ndarray[float)]: last internal wave vector.
Returns:
W (ndarray[float]): roughness matrix.
"""
W = np.zeros([k_z.shape[0], k_z.shape[1], 4, 4], dtype=np.cfloat)
rugosp = np.exp(-((k_z + last_k_z)**2) * roughness**2 / 2)
rugosn = np.exp(-((-k_z + last_k_z)**2) * roughness**2 / 2)
W[:, :, 0, 0] = rugosn
W[:, :, 0, 1] = rugosn
W[:, :, 0, 2] = rugosp
W[:, :, 0, 3] = rugosp
W[:, :, 1, 0] = rugosn
W[:, :, 1, 1] = rugosn
W[:, :, 1, 2] = rugosp
W[:, :, 1, 3] = rugosp
W[:, :, 2, 0] = rugosp
W[:, :, 2, 1] = rugosp
W[:, :, 2, 2] = rugosn
W[:, :, 2, 3] = rugosn
W[:, :, 3, 0] = rugosp
W[:, :, 3, 1] = rugosp
W[:, :, 3, 2] = rugosn
W[:, :, 3, 3] = rugosn
return W
|
dschick/udkm1Dsim
|
udkm1Dsim/simulations/xrays.py
|
Python
|
mit
| 115,918
|
[
"Gaussian"
] |
51e67da4ed314e66004cafd96429d17323ac2aee43e90bc80568489ff9f8b39f
|
#!/bin/env python
# try importing the modules that do not come with Python by default
# check if it is installed by importing the modules
try:
from google.cloud import texttospeech
import PySimpleGUI as sg
import babel
#print('\nAll modules installed successfully, have fun! d^o^b')
except Exception as e:
# something is wrong with the imports try installing them
import os
# set the proxy
#os.environ['HTTPS_PROXY'] = r'http://ep.threatpulse.net:80'
# install from the requirements.txt file
os.system('pip install -U -r requirements.txt')
# the modules should be fine now...
try:
from google.cloud import texttospeech
import PySimpleGUI as sg
import babel
except Exception as e:
print('\nNot good, failed to import dependencies!')
print(e)
import sys
sys.exit(1)
# if it got here then everything seems fine
import os
import random
import time
import webbrowser
import sys, traceback
# these need to be installed manually
from google.cloud import texttospeech
import PySimpleGUI as sg
import babel
# this is the main class for the project it contains both GUI code and API calls.
# it uses the PySimpleGUI module for GUI code which is already a wrapper class to speed up GUI dev
# and it makes calls to the Google Cloud API to fetch a list of voices that which can be used to synthesize text
class google_tts():
def __init__(self, debug=False):
# set OS ENV var for the Google authentication token
if os.environ.get('GOOGLE_APPLICATION_CREDENTIALS') is None:
os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = r'C:\secure\auth.json'
# set the proxy
#os.environ['HTTPS_PROXY'] = r'http://ep.threatpulse.net:80'
self.debug = debug
self.selected_options = {}
self.selected_options['language_locale'] = ''
self.selected_options['voice_type'] = ''
self.selected_options['voice_option'] = ''
self.default_text = 'Google Cloud Text-to-Speech enables developers to synthesize natural-sounding speech with 32 voices, ' + \
'available in multiple languages and variants. It applies DeepMind’s groundbreaking research in WaveNet and ' + \
'Google’s powerful neural networks to deliver the highest fidelity possible. As an easy-to-use API, ' + \
'you can create lifelike interactions with your users, across many applications and devices.'
self.default_output = os.path.join(os.getcwd(),'output.mp3')
def debug_print(self, *args):
if self.debug:
try:
print(''.join(args), flush=True)
except:
pass
def synthesize(self, values):
locale_code = self.convert_lang_to_locale(values['language_locale'])
voice_type = values['voice_type']
voice_option = values['voice_option'].split('-')
voice_name='{}-{}-{}'.format(locale_code,voice_type,voice_option[0])
if values['input_type_text'] == True:
input_text = texttospeech.types.SynthesisInput(text=values['input'])
elif values['input_type_ssml'] == True:
input_text = texttospeech.types.SynthesisInput(ssml=values['input'])
for gender in texttospeech.enums.SsmlVoiceGender:
if gender.name == voice_option[1]:
ssml_gender = gender
break
self.debug_print('language_locale:', locale_code)
self.debug_print('voice_type:', voice_type)
self.debug_print('voice_option:', voice_option)
self.debug_print('voice_name:', voice_name)
self.debug_print('ssml_gender:', ssml_gender)
voice = texttospeech.types.VoiceSelectionParams(
#language_code='en-GB',
language_code=locale_code,
#name='en-GB-Wavenet-B',
name=voice_name,
#ssml_gender=texttospeech.enums.SsmlVoiceGender.MALE)
ssml_gender=ssml_gender)
audio_config = texttospeech.types.AudioConfig(
audio_encoding=texttospeech.enums.AudioEncoding.MP3,
speaking_rate=(values['speed']/100.0),
pitch=values['pitch'])
response = self.client.synthesize_speech(input_text, voice, audio_config)
# The response's audio_content is binary.
with open(values['output'], 'wb') as out:
out.write(response.audio_content)
sg.PopupQuick('Audio content written to file "{}"'.format(values['output']), no_titlebar=True, button_type=sg.POPUP_BUTTONS_NO_BUTTONS)
def set_form_layout(self):
self.layout = [
[sg.Text('Google Cloud Text-to-Speech', size=(38, 1), justification='center', font=('Helvetica', 25), relief=sg.RELIEF_RIDGE)],
[sg.Frame(layout=[
[sg.Radio('text', 'input_type', key='input_type_text', default=True),
sg.Radio('ssml', 'input_type', key='input_type_ssml')]
], title='Input type', tooltip='Choose input type'),
sg.Text(' ' * 110),
sg.Button('Google API', key='API', size=(12,2))],
[sg.Multiline(default_text=self.default_text, key='input', size=(100, 15), do_not_clear=True),
],
[sg.Text('Language / locale', size=(25, 1)),
sg.Text(' ' * 17),
sg.Text('Voice type', size=(15, 1)),
sg.Text(' ' * 20),
sg.Text('Voice option / gender', size=(18, 1))],
[sg.InputCombo(self.languages, key='language_locale',size=(25, 1),change_submits=True, readonly=True),
sg.Text(' ' * 20),
sg.InputCombo(['--choose locale--'], key='voice_type', size=(15, 1), change_submits=True, readonly=True),
sg.Text(' ' * 20),
sg.InputCombo(['--choose voice type--'], key='voice_option', size=(15, 1), readonly=True)],
[sg.Frame(layout=[[sg.Slider(range=(25, 400), key='speed', orientation='h', size=(29, 20), default_value=100)]], title='Speed'),
sg.Frame(layout=[[sg.Slider(range=(-20, 20), key='pitch', orientation='h', size=(29, 20), default_value=0)]], title='Pitch')],
[sg.Text('_' * 102)],
[sg.Text('Choose a location and filename to save the output mp3 as:', size=(50, 1))],
[sg.InputText(self.default_output, key='output',size=(91, 1), do_not_clear=True),
sg.FileSaveAs(target='output', file_types=(('MP3 Files', '*.mp3'),))],
[sg.Button('Synthesize', tooltip='Click to synthesize', size=(18,2)),
sg.Button('Play', tooltip='Click to play', size=(18,2)),
sg.Button('Open', tooltip='Click to open output location', size=(18,2)),
sg.Text(' ' * 10),
sg.Button('Reset', tooltip='Click to reset values', size=(10,2)),
sg.Exit(size=(10, 2))]
]
def unpack_api_data(self):
# dict to keep track of all languages and locales from API
self.locales = {}
# list to only keep track of unique languages for the form
self.languages = []
# create a tree of the languages and the options used for the GUI,
# i.e. list of keys (locales) that links to a list of available voice types, genders and multiple options for that voice type
self.voice_list = []
# API call to get full list of supported voices
self.api_voices = self.client.list_voices()
# loop through each voice and identify the following:
# - locale code
# - language (used only in disply, backwards link to locale code)
# - voice type (within each language there are types which can be various options of gender)
for voice in self.api_voices.voices:
# grab the voice's name. e.g.: en-GB-Standard-A and add the gender to the option Male = en-GB-Standard-A-Male
voice_formatted = '{}-{}'.format(voice.name, texttospeech.enums.SsmlVoiceGender(voice.ssml_gender).name)
self.debug_print(voice_formatted)
self.voice_list.append(voice_formatted)
# languages is a list but only 1 item that I can see
language_code = voice.language_codes[0]
# convert language code to babel friendly code. Example: 'en-GB' => 'en_GB'
nice_name = ""
try:
babel_locale_code = babel.Locale.parse(language_code.replace('-','_'))
nice_name = babel_locale_code.get_display_name('en')
except:
nice_name = language_code
# check if it exists in the dict or else add it in its original form e.g. en-GB
if language_code not in self.locales:
# store the key as its original form not babels form
# grab language code and convert to a display friendly language name
# store this as the value, example: 'en-GB' -> 'English (United Kingdom)'
self.locales[language_code] = nice_name
self.locales[language_code] = nice_name
# add it to languages as well, used in the GUI
if self.locales[language_code] not in self.languages:
self.languages.append(self.locales[language_code])
# update the default text to the correct amount of voices
self.default_text = 'Google Cloud Text-to-Speech enables developers to synthesize natural-sounding speech with {} voices, '.format(len(self.voice_list)) + \
'available in multiple languages and variants. It applies DeepMind’s groundbreaking research in WaveNet and ' + \
'Google’s powerful neural networks to deliver the highest fidelity possible. As an easy-to-use API, ' + \
'you can create lifelike interactions with your users, across many applications and devices.'
# sort language list
self.languages.sort()
def convert_lang_to_locale(self,language_code):
locale_code = ''
for key in self.locales:
if language_code == self.locales[key]:
self.debug_print('Language / local: {} = {}'.format(language_code, key))
locale_code = key
break
return locale_code
def get_voice_types(self, language_code):
# convert chosen language to locale code
locale_code = ''
voice_types = []
self.selected_options['language_locale'] = self.convert_lang_to_locale(language_code)
if self.selected_options['language_locale'] != '':
# we have a locale code
# now get get a list of voice types
for voice in self.voice_list:
if self.selected_options['language_locale'] in voice:
# strip the locale code and voice option from the list
# en-GB-Standard-A-Male => Standard
voice_split = voice.split('-')
voice_type = voice_split[2]
if voice_type not in voice_types:
voice_types.append(voice_type)
else:
return ['--choose locale--']
self.selected_options['voice_type'] = voice_types[0]
return voice_types
def get_voice_options(self, voice_type=''):
if voice_type == '':
voice_type = self.selected_options['voice_type']
else:
self.selected_options['voice_type'] = voice_type
voice_options = []
for voice in self.voice_list:
if self.selected_options['language_locale'] in voice:
if self.selected_options['voice_type'] in voice:
# strip the locale code and voice type from the list
# en-GB-Standard-A-Male => A-Male
voice_split = voice.split('-')
voice_option = '{}-{}'.format(voice_split[3], voice_split[4])
if voice_option not in voice_options:
voice_options.append(voice_option)
self.selected_options['voice_option'] = voice_options[0]
return voice_options
def set_defaults_options_on_form(self, window):
if self.set_defaults == True:
# set the input text type and
window.FindElement('input').Update(value=self.default_text)
# set the input back to text
window.FindElement('input_type_text').Update(value=True)
# set selected language / locale to British English
self.selected_options['language_locale'] = 'English (United Kingdom)'
# select the item in the dropdown to the chosen language
window.FindElement('language_locale').Update(set_to_index=self.languages.index(self.selected_options['language_locale']))
# retrieve the list of voice types for the chosen language / locale
voice_types = self.get_voice_types(self.selected_options['language_locale'])
voice_types.sort()
# update the voice types drop down with the new list
window.FindElement('voice_type').Update(values=voice_types)
# set the selected voice type to the Wavenet type
self.selected_options['voice_type'] = 'Wavenet'
# select the item in the dropdown to the chosen voice type
window.FindElement('voice_type').Update(set_to_index=voice_types.index(self.selected_options['voice_type']))
# retrieve the voice options for the chose language / locale and the chosen voice type
voice_options = self.get_voice_options()
voice_options.sort()
# update the drop down the new list
window.FindElement('voice_option').Update(values=voice_options)
# set the selected voice option to the first Male option
self.selected_options['voice_option'] = 'B-MALE'
# select the item in the dropdown to the chosen voice option
window.FindElement('voice_option').Update(set_to_index=voice_options.index(self.selected_options['voice_option']))
# set the speed slider
window.FindElement('speed').Update(value=100)
# set the pitch slider
window.FindElement('pitch').Update(value=0)
# set the output location
window.FindElement('output').Update(self.default_output)
self.set_defaults = False
def main(self):
# set a random look and feel to spice things up
# sg.ChangeLookAndFeel(random.choice(self.colours))
# CDK colour scheme
colours = ['#82C600', '#509E2F', '#FFFFFF', '#000000', '#939598']
sg.SetOptions(background_color=colours[0],
text_element_background_color=colours[0],
element_background_color=colours[0],
scrollbar_color=colours[1],
input_elements_background_color=colours[2],
text_color=colours[3],
button_color=('white', colours[1]))
# inform the user that the data is being retrieved from Google
sg.PopupQuick('Retrieving data from Google', no_titlebar=True, button_type=sg.POPUP_BUTTONS_NO_BUTTONS)
# make the Google API call to create the client
self.client = texttospeech.TextToSpeechClient()
# retrieve and unpack the data from the client
self.unpack_api_data()
# design and open our window and show all the options to the user
self.set_form_layout()
window = sg.Window('Google Cloud Text-to-Speech', no_titlebar=False, default_element_size=(40, 1), grab_anywhere=False).Layout(self.layout)
self.set_defaults = True
try:
# enter an indefinte loop to keep the form open and the user can interact with it, we can then check the button presses
while True:
event, values = window.Read(timeout=100)
self.set_defaults_options_on_form(window)
# check which button was clicked
if event == 'Exit':
break
elif event == 'API':
webbrowser.open('https://cloud.google.com/text-to-speech/')
elif event == 'language_locale':
self.debug_print(event, values)
voice_types = self.get_voice_types(values['language_locale'])
window.FindElement('voice_type').Update(values=voice_types)
voice_options = self.get_voice_options()
window.FindElement('voice_option').Update(values=voice_options)
elif event == 'voice_type':
self.debug_print(event, values)
voice_options = self.get_voice_options(values['voice_type'])
window.FindElement('voice_option').Update(values=voice_options)
elif event == 'SaveAs':
if values['output'].endswith('.mp3') == False:
output_file = values['output'] + '.mp3'
window.FindElement('output').Update(value=voice_options)
elif event == 'Reset':
self.set_defaults = True
self.set_defaults_options_on_form(window)
elif event == 'Open':
try:
full_path = values['output']
file_index = full_path.index(full_path.split(os.path.sep)[-1])
self.debug_print(full_path, full_path.split(os.path.sep), full_path.split(os.path.sep)[-1], file_index, full_path[:file_index])
webbrowser.open(full_path[:file_index])
except:
sg.Popup('Unable to open the output location: "{}"'.format(values['output']))
elif event == 'Play':
try:
if os.path.exists(values['output']):
webbrowser.open(values['output'])
else:
sg.Popup('You need to first create the file at location: \n"{}"'.format(values['output']))
except Exception as e:
sg.Popup('An error occurred trying to play the file at location: "{}"\n{}'.format(values['output'], e))
elif event == 'Synthesize':
try:
self.synthesize(values)
except Exception as e:
sg.Popup('Unable to synthesize input: "{}"'.format(e))
if event is sg.TIMEOUT_KEY:
update = False
# fix some things on the form
if values['output'].endswith('.mp3') == False:
output_file = values['output'] + '.mp3'
update = True
# in Windows for some reason the file path is return with forward slashes,
# rather just use the correct OS path separator, if *nix based it will stay forward slash
if '/' in values['output']:
output_file = values['output'].replace('/', os.path.sep)
update = True
if update == True:
window.FindElement('output').Update(value=output_file)
elif event is not None and event is not sg.TIMEOUT_KEY:
self.debug_print(event, values)
# if for some reason there is nothing on the form
if values is None:
break
except Exception as e:
sg.PopupError('Unexpected error occurred: "{}"'.format(e), no_titlebar=True)
traceback.print_exc(file=sys.stdout)
finally:
window.Close()
if __name__ == '__main__':
# create instance of the google tts form
tts_google = google_tts(debug=True)
# call it's main function
tts_google.main()
|
Pyroseza/Random
|
Google TTS/tts_form.py
|
Python
|
mit
| 19,932
|
[
"CDK"
] |
804bdd31a93b0212a2fb0025c5fe6fd69a6a183a5197f4090eec9622600622f1
|
# (C) British Crown Copyright 2010 - 2016, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
"""
Integration tests for grib2 file loading.
This code used to be part of 'tests/test_grib_load.py', but these integration-
style tests have been split out of there : They now work with either the
internal 'iris.fileformats.grib' module *or* the newer, external
'iris_grib' package.
The remainder of the old 'tests/test_grib_load.py' is now renamed as
'tests/test_grib_load_translations.py'. Those tests are implementation-
specific, and target the deprecated internal module 'iris.fileformats.grib'.
"""
from __future__ import (absolute_import, division, print_function)
from six.moves import (filter, input, map, range, zip) # noqa
# Import iris tests first so that some things can be initialised before
# importing anything else
import iris.tests as tests
import iris
import iris.exceptions
from iris.tests import mock
import iris.tests.stock
import iris.util
from unittest import skipIf
if tests.GRIB_AVAILABLE:
try:
import iris_grib
iris_internal_grib_module = None
except ImportError:
from iris.fileformats import grib as iris_internal_grib_module
else:
iris_internal_grib_module = None
# Skip out some tests that currently fail with 'iris_grib'.
# TODO: either fix these problems, or remove the tests.
skip_irisgrib_fails = skipIf(iris_internal_grib_module is None,
'Test(s) are not currently ussable with '
'"iris_grib".')
@tests.skip_data
@tests.skip_grib
class TestBasicLoad(tests.GraphicsTest):
def test_load_rotated(self):
cubes = iris.load(tests.get_data_path(('GRIB', 'rotated_uk',
"uk_wrongparam.grib1")))
self.assertCML(cubes, ("grib_load", "rotated.cml"))
def test_load_time_bound(self):
cubes = iris.load(tests.get_data_path(('GRIB', "time_processed",
"time_bound.grib1")))
self.assertCML(cubes, ("grib_load", "time_bound_grib1.cml"))
def test_load_time_processed(self):
cubes = iris.load(tests.get_data_path(('GRIB', "time_processed",
"time_bound.grib2")))
self.assertCML(cubes, ("grib_load", "time_bound_grib2.cml"))
def test_load_3_layer(self):
cubes = iris.load(tests.get_data_path(('GRIB', "3_layer_viz",
"3_layer.grib2")))
cubes = iris.cube.CubeList([cubes[1], cubes[0], cubes[2]])
self.assertCML(cubes, ("grib_load", "3_layer.cml"))
def test_load_masked(self):
gribfile = tests.get_data_path(
('GRIB', 'missing_values', 'missing_values.grib2'))
cubes = iris.load(gribfile)
self.assertCML(cubes, ('grib_load', 'missing_values_grib2.cml'))
@skip_irisgrib_fails
def test_y_fastest(self):
cubes = iris.load(tests.get_data_path(("GRIB", "y_fastest",
"y_fast.grib2")))
self.assertCML(cubes, ("grib_load", "y_fastest.cml"))
def test_polar_stereo_grib1(self):
cube = iris.load_cube(tests.get_data_path(
("GRIB", "polar_stereo", "ST4.2013052210.01h")))
self.assertCML(cube, ("grib_load", "polar_stereo_grib1.cml"))
def test_polar_stereo_grib2(self):
cube = iris.load_cube(tests.get_data_path(
("GRIB", "polar_stereo",
"CMC_glb_TMP_ISBL_1015_ps30km_2013052000_P006.grib2")))
self.assertCML(cube, ("grib_load", "polar_stereo_grib2.cml"))
def test_lambert_grib1(self):
cube = iris.load_cube(tests.get_data_path(
("GRIB", "lambert", "lambert.grib1")))
self.assertCML(cube, ("grib_load", "lambert_grib1.cml"))
def test_lambert_grib2(self):
cube = iris.load_cube(tests.get_data_path(
("GRIB", "lambert", "lambert.grib2")))
self.assertCML(cube, ("grib_load", "lambert_grib2.cml"))
def test_regular_gg_grib1(self):
cube = iris.load_cube(tests.get_data_path(
('GRIB', 'gaussian', 'regular_gg.grib1')))
self.assertCML(cube, ('grib_load', 'regular_gg_grib1.cml'))
def test_regular_gg_grib2(self):
cube = iris.load_cube(tests.get_data_path(
('GRIB', 'gaussian', 'regular_gg.grib2')))
self.assertCML(cube, ('grib_load', 'regular_gg_grib2.cml'))
def test_reduced_ll(self):
cube = iris.load_cube(tests.get_data_path(
("GRIB", "reduced", "reduced_ll.grib1")))
self.assertCML(cube, ("grib_load", "reduced_ll_grib1.cml"))
def test_reduced_gg(self):
cube = iris.load_cube(tests.get_data_path(
("GRIB", "reduced", "reduced_gg.grib2")))
self.assertCML(cube, ("grib_load", "reduced_gg_grib2.cml"))
@skip_irisgrib_fails
def test_reduced_missing(self):
cube = iris.load_cube(tests.get_data_path(
("GRIB", "reduced", "reduced_ll_missing.grib1")))
self.assertCML(cube, ("grib_load", "reduced_ll_missing_grib1.cml"))
@tests.skip_data
@tests.skip_grib
class TestIjDirections(tests.GraphicsTest):
@staticmethod
def _old_compat_load(name):
cube = iris.load(tests.get_data_path(('GRIB', 'ij_directions',
name)))[0]
return [cube]
def test_ij_directions_ipos_jpos(self):
cubes = self._old_compat_load("ipos_jpos.grib2")
self.assertCML(cubes, ("grib_load", "ipos_jpos.cml"))
def test_ij_directions_ipos_jneg(self):
cubes = self._old_compat_load("ipos_jneg.grib2")
self.assertCML(cubes, ("grib_load", "ipos_jneg.cml"))
def test_ij_directions_ineg_jneg(self):
cubes = self._old_compat_load("ineg_jneg.grib2")
self.assertCML(cubes, ("grib_load", "ineg_jneg.cml"))
def test_ij_directions_ineg_jpos(self):
cubes = self._old_compat_load("ineg_jpos.grib2")
self.assertCML(cubes, ("grib_load", "ineg_jpos.cml"))
@tests.skip_data
@tests.skip_grib
class TestShapeOfEarth(tests.GraphicsTest):
@staticmethod
def _old_compat_load(name):
cube = iris.load(tests.get_data_path(('GRIB', 'shape_of_earth',
name)))[0]
return cube
def test_shape_of_earth_basic(self):
# pre-defined sphere
cube = self._old_compat_load("0.grib2")
self.assertCML(cube, ("grib_load", "earth_shape_0.cml"))
def test_shape_of_earth_custom_1(self):
# custom sphere
cube = self._old_compat_load("1.grib2")
self.assertCML(cube, ("grib_load", "earth_shape_1.cml"))
@skip_irisgrib_fails
def test_shape_of_earth_IAU65(self):
# IAU65 oblate sphere
cube = self._old_compat_load("2.grib2")
self.assertCML(cube, ("grib_load", "earth_shape_2.cml"))
def test_shape_of_earth_custom_3(self):
# custom oblate spheroid (km)
cube = self._old_compat_load("3.grib2")
self.assertCML(cube, ("grib_load", "earth_shape_3.cml"))
@skip_irisgrib_fails
def test_shape_of_earth_IAG_GRS80(self):
# IAG-GRS80 oblate spheroid
cube = self._old_compat_load("4.grib2")
self.assertCML(cube, ("grib_load", "earth_shape_4.cml"))
@skip_irisgrib_fails
def test_shape_of_earth_WGS84(self):
# WGS84
cube = self._old_compat_load("5.grib2")
self.assertCML(cube, ("grib_load", "earth_shape_5.cml"))
def test_shape_of_earth_pre_6(self):
# pre-defined sphere
cube = self._old_compat_load("6.grib2")
self.assertCML(cube, ("grib_load", "earth_shape_6.cml"))
def test_shape_of_earth_custom_7(self):
# custom oblate spheroid (m)
cube = self._old_compat_load("7.grib2")
self.assertCML(cube, ("grib_load", "earth_shape_7.cml"))
def test_shape_of_earth_grib1(self):
# grib1 - same as grib2 shape 6, above
cube = self._old_compat_load("global.grib1")
self.assertCML(cube, ("grib_load", "earth_shape_grib1.cml"))
if __name__ == "__main__":
tests.main()
|
jswanljung/iris
|
lib/iris/tests/integration/test_grib_load.py
|
Python
|
lgpl-3.0
| 8,778
|
[
"Gaussian"
] |
648bf8416e1842e2471541e9597013fe825a36834e86db05648ef1a77d47f37d
|
# Lint as: python2, python3
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Upgrader for Python scripts from 1.* TensorFlow to 2.0 TensorFlow."""
import ast
import copy
import functools
import sys
import pasta
import six
from tensorflow.tools.compatibility import all_renames_v2
from tensorflow.tools.compatibility import ast_edits
from tensorflow.tools.compatibility import module_deprecations_v2
from tensorflow.tools.compatibility import reorders_v2
# These pylint warnings are a mistake.
# pylint: disable=g-explicit-bool-comparison,g-bool-id-comparison
class UnaliasedTFImport(ast_edits.AnalysisResult):
def __init__(self):
self.log_level = ast_edits.ERROR
self.log_message = ("The tf_upgrade_v2 script detected an unaliased "
"`import tensorflow`. The script can only run when "
"importing with `import tensorflow as tf`.")
class VersionedTFImport(ast_edits.AnalysisResult):
def __init__(self, version):
self.log_level = ast_edits.INFO
self.log_message = ("Not upgrading symbols because `tensorflow." +
six.ensure_str(version) +
"` was directly imported as `tf`.")
compat_v1_import = VersionedTFImport("compat.v1")
compat_v2_import = VersionedTFImport("compat.v2")
class TFAPIImportAnalysisSpec(ast_edits.APIAnalysisSpec):
def __init__(self):
self.symbols_to_detect = {}
self.imports_to_detect = {
("tensorflow", None): UnaliasedTFImport(),
("tensorflow.compat.v1", "tf"): compat_v1_import,
("tensorflow.compat.v2", "tf"): compat_v2_import,
}
class CompatV1ImportReplacer(ast.NodeVisitor):
"""AST Visitor that replaces `import tensorflow.compat.v1 as tf`.
Converts `import tensorflow.compat.v1 as tf` to `import tensorflow as tf`
"""
def visit_Import(self, node): # pylint: disable=invalid-name
"""Handle visiting an import node in the AST.
Args:
node: Current Node
"""
for import_alias in node.names:
# Detect based on full import name and alias
if (import_alias.name == "tensorflow.compat.v1" and
import_alias.asname == "tf"):
import_alias.name = "tensorflow"
self.generic_visit(node)
class TFAPIChangeSpec(ast_edits.NoUpdateSpec):
"""List of maps that describe what changed in the API."""
def __init__(self, import_rename=False, upgrade_compat_v1_import=False):
self.upgrade_compat_v1_import = upgrade_compat_v1_import
# Maps from a function name to a dictionary that describes how to
# map from an old argument keyword to the new argument keyword.
# If the new argument is None, it will be removed.
# Only keyword args are handled, so make sure to also put any function in
# function_reorders to ensure that all args are made into keywords first.
self.function_keyword_renames = {
# TODO(b/129398290)
# "tf.string_split": {
# "delimiter": "sep",
# },
"tf.test.assert_equal_graph_def": {
"checkpoint_v2": None,
"hash_table_shared_name": None,
},
"tf.autograph.to_code": {
"arg_types": None,
"arg_values": None,
"indentation": None,
},
"tf.autograph.to_graph": {
"arg_types": None,
"arg_values": None,
},
"tf.nn.embedding_lookup": {
"validate_indices": None,
},
"tf.image.sample_distorted_bounding_box": {
"seed2": None,
},
"tf.gradients": {
"colocate_gradients_with_ops": None,
},
"tf.hessians": {
"colocate_gradients_with_ops": None,
},
"*.minimize": {
"colocate_gradients_with_ops": None,
},
"*.compute_gradients": {
"colocate_gradients_with_ops": None,
},
"tf.cond": {
"strict": None,
"fn1": "true_fn",
"fn2": "false_fn"
},
"tf.argmin": {
"dimension": "axis",
},
"tf.argmax": {
"dimension": "axis",
},
"tf.arg_min": {
"dimension": "axis",
},
"tf.arg_max": {
"dimension": "axis",
},
"tf.math.argmin": {
"dimension": "axis",
},
"tf.math.argmax": {
"dimension": "axis",
},
"tf.image.crop_and_resize": {
"box_ind": "box_indices",
},
"tf.extract_image_patches": {
"ksizes": "sizes",
},
"tf.image.extract_image_patches": {
"ksizes": "sizes",
},
"tf.image.resize": {
"align_corners": None,
},
"tf.image.resize_images": {
"align_corners": None,
},
"tf.expand_dims": {
"dim": "axis",
},
"tf.batch_to_space": {
"block_size": "block_shape",
},
"tf.space_to_batch": {
"block_size": "block_shape",
},
"tf.nn.space_to_batch": {
"block_size": "block_shape",
},
"tf.constant": {
"verify_shape": "verify_shape_is_now_always_true",
},
"tf.convert_to_tensor": {
"preferred_dtype": "dtype_hint"
},
"tf.nn.softmax_cross_entropy_with_logits": {
"dim": "axis",
"_sentinel": None,
},
"tf.nn.softmax_cross_entropy_with_logits_v2": {
"dim": "axis"
},
"tf.linalg.l2_normalize": {
"dim": "axis",
},
"tf.linalg.norm": {
"keep_dims": "keepdims",
},
"tf.norm": {
"keep_dims": "keepdims",
},
"tf.load_file_system_library": {
"library_filename": "library_location",
},
"tf.count_nonzero": {
"input_tensor": "input",
"keep_dims": "keepdims",
"reduction_indices": "axis",
},
"tf.math.count_nonzero": {
"input_tensor": "input",
"keep_dims": "keepdims",
"reduction_indices": "axis",
},
"tf.nn.erosion2d": {
"kernel": "filters",
"rates": "dilations",
},
"tf.math.l2_normalize": {
"dim": "axis",
},
"tf.math.log_softmax": {
"dim": "axis",
},
"tf.math.softmax": {
"dim": "axis"
},
"tf.nn.l2_normalize": {
"dim": "axis",
},
"tf.nn.log_softmax": {
"dim": "axis",
},
"tf.nn.moments": {
"keep_dims": "keepdims",
},
"tf.nn.pool": {
"dilation_rate": "dilations"
},
"tf.nn.separable_conv2d": {
"rate": "dilations"
},
"tf.nn.depthwise_conv2d": {
"rate": "dilations"
},
"tf.nn.softmax": {
"dim": "axis"
},
"tf.nn.sufficient_statistics": {
"keep_dims": "keepdims"
},
"tf.debugging.assert_all_finite": {
"t": "x",
"msg": "message",
},
"tf.sparse.add": {
"thresh": "threshold",
},
"tf.sparse_add": {
"thresh": "threshold",
},
"tf.sparse.concat": {
"concat_dim": "axis",
"expand_nonconcat_dim": "expand_nonconcat_dims",
},
"tf.sparse_concat": {
"concat_dim": "axis",
"expand_nonconcat_dim": "expand_nonconcat_dims",
},
"tf.sparse.split": {
"split_dim": "axis",
},
"tf.sparse_split": {
"split_dim": "axis",
},
"tf.sparse.reduce_max": {
"reduction_axes": "axis",
"keep_dims": "keepdims",
},
"tf.sparse_reduce_max": {
"reduction_axes": "axis",
"keep_dims": "keepdims",
},
"tf.sparse.reduce_sum": {
"reduction_axes": "axis",
"keep_dims": "keepdims",
},
"tf.sparse_reduce_sum": {
"reduction_axes": "axis",
"keep_dims": "keepdims",
},
"tf.nn.max_pool_with_argmax": {
"Targmax": "output_dtype",
},
"tf.nn.max_pool": {
"value": "input"
},
"tf.nn.avg_pool": {
"value": "input"
},
"tf.nn.avg_pool2d": {
"value": "input"
},
"tf.multinomial": {
"output_dtype": "dtype",
},
"tf.random.multinomial": {
"output_dtype": "dtype",
},
"tf.reverse_sequence": {
"seq_dim": "seq_axis",
"batch_dim": "batch_axis",
},
"tf.nn.batch_norm_with_global_normalization": {
"t": "input",
"m": "mean",
"v": "variance",
},
"tf.nn.dilation2d": {
"filter": "filters",
"rates": "dilations",
},
"tf.nn.conv3d": {
"filter": "filters"
},
"tf.zeros_like": {
"tensor": "input",
},
"tf.ones_like": {
"tensor": "input",
},
"tf.nn.conv2d_transpose": {
"value": "input",
"filter": "filters",
},
"tf.nn.conv3d_transpose": {
"value": "input",
"filter": "filters",
},
"tf.nn.convolution": {
"filter": "filters",
"dilation_rate": "dilations",
},
"tf.gfile.Exists": {
"filename": "path",
},
"tf.gfile.Remove": {
"filename": "path",
},
"tf.gfile.Stat": {
"filename": "path",
},
"tf.gfile.Glob": {
"filename": "pattern",
},
"tf.gfile.MkDir": {
"dirname": "path",
},
"tf.gfile.MakeDirs": {
"dirname": "path",
},
"tf.gfile.DeleteRecursively": {
"dirname": "path",
},
"tf.gfile.IsDirectory": {
"dirname": "path",
},
"tf.gfile.ListDirectory": {
"dirname": "path",
},
"tf.gfile.Copy": {
"oldpath": "src",
"newpath": "dst",
},
"tf.gfile.Rename": {
"oldname": "src",
"newname": "dst",
},
"tf.gfile.Walk": {
"in_order": "topdown",
},
"tf.random.stateless_multinomial": {
"output_dtype": "dtype",
},
"tf.string_to_number": {
"string_tensor": "input",
},
"tf.strings.to_number": {
"string_tensor": "input",
},
"tf.string_to_hash_bucket": {
"string_tensor": "input",
},
"tf.strings.to_hash_bucket": {
"string_tensor": "input",
},
"tf.reduce_all": {
"reduction_indices": "axis",
"keep_dims": "keepdims",
},
"tf.math.reduce_all": {
"reduction_indices": "axis",
"keep_dims": "keepdims",
},
"tf.reduce_any": {
"reduction_indices": "axis",
"keep_dims": "keepdims",
},
"tf.math.reduce_any": {
"reduction_indices": "axis",
"keep_dims": "keepdims",
},
"tf.reduce_min": {
"reduction_indices": "axis",
"keep_dims": "keepdims",
},
"tf.math.reduce_min": {
"reduction_indices": "axis",
"keep_dims": "keepdims",
},
"tf.reduce_max": {
"reduction_indices": "axis",
"keep_dims": "keepdims",
},
"tf.math.reduce_max": {
"reduction_indices": "axis",
"keep_dims": "keepdims",
},
"tf.reduce_sum": {
"reduction_indices": "axis",
"keep_dims": "keepdims",
},
"tf.math.reduce_sum": {
"reduction_indices": "axis",
"keep_dims": "keepdims",
},
"tf.reduce_mean": {
"reduction_indices": "axis",
"keep_dims": "keepdims",
},
"tf.math.reduce_mean": {
"reduction_indices": "axis",
"keep_dims": "keepdims",
},
"tf.reduce_prod": {
"reduction_indices": "axis",
"keep_dims": "keepdims",
},
"tf.math.reduce_prod": {
"reduction_indices": "axis",
"keep_dims": "keepdims",
},
"tf.reduce_logsumexp": {
"reduction_indices": "axis",
"keep_dims": "keepdims",
},
"tf.math.reduce_logsumexp": {
"reduction_indices": "axis",
"keep_dims": "keepdims",
},
"tf.reduce_join": {
"keep_dims": "keepdims",
"reduction_indices": "axis"
},
"tf.strings.reduce_join": {
"keep_dims": "keepdims",
"reduction_indices": "axis"
},
"tf.squeeze": {
"squeeze_dims": "axis",
},
"tf.nn.weighted_moments": {
"keep_dims": "keepdims"
},
"tf.nn.conv1d": {
"value": "input",
"use_cudnn_on_gpu": None,
},
"tf.nn.conv2d": {
"filter": "filters",
"use_cudnn_on_gpu": None,
},
"tf.nn.conv2d_backprop_input": {
"use_cudnn_on_gpu": None,
"input_sizes": "output_shape",
"out_backprop": "input",
"filter": "filters",
},
"tf.contrib.summary.audio": {
"tensor": "data",
"family": None,
},
"tf.contrib.summary.create_file_writer": {
"name": None,
},
"tf.contrib.summary.generic": {
"name": "tag",
"tensor": "data",
"family": None,
},
"tf.contrib.summary.histogram": {
"tensor": "data",
"family": None,
},
"tf.contrib.summary.image": {
"tensor": "data",
"bad_color": None,
"max_images": "max_outputs",
"family": None,
},
"tf.contrib.summary.scalar": {
"tensor": "data",
"family": None,
},
"tf.nn.weighted_cross_entropy_with_logits": {
"targets": "labels",
},
"tf.decode_raw": {
"bytes": "input_bytes",
},
"tf.io.decode_raw": {
"bytes": "input_bytes",
},
"tf.contrib.framework.load_variable": {
"checkpoint_dir": "ckpt_dir_or_file",
}
}
all_renames_v2.add_contrib_direct_import_support(
self.function_keyword_renames)
# Mapping from function to the new name of the function
# Add additional renames not in renames_v2.py to all_renames_v2.py.
self.symbol_renames = all_renames_v2.symbol_renames
self.import_rename = import_rename
if self.import_rename:
self.import_renames = {
"tensorflow":
ast_edits.ImportRename(
"tensorflow.compat.v2",
excluded_prefixes=[
"tensorflow.contrib", "tensorflow.flags",
"tensorflow.compat.v1", "tensorflow.compat.v2",
"tensorflow.google"
],
)
}
else:
self.import_renames = {}
# Variables that should be changed to functions.
self.change_to_function = {}
# pylint: disable=line-too-long
# This list should just contain names of functions that had
# their arguments reordered. After adding a function name to the list
# run the following to update reorders_v2.py:
# bazel build tensorflow/tools/compatibility/update:generate_v2_reorders_map
# bazel-bin/tensorflow/tools/compatibility/update/generate_v2_reorders_map
# pylint: enable=line-too-long
self.reordered_function_names = {
"tf.io.serialize_sparse",
"tf.io.serialize_many_sparse",
"tf.argmax",
"tf.argmin",
"tf.batch_to_space",
"tf.cond",
"tf.nn.space_to_batch",
"tf.boolean_mask",
"tf.convert_to_tensor",
"tf.nn.conv1d",
"tf.nn.conv2d",
"tf.nn.conv2d_backprop_input",
"tf.nn.ctc_beam_search_decoder",
"tf.nn.moments",
"tf.nn.convolution",
"tf.nn.crelu",
"tf.nn.weighted_moments",
"tf.nn.pool",
"tf.nn.separable_conv2d",
"tf.nn.depthwise_conv2d",
"tf.multinomial",
"tf.random.multinomial",
"tf.pad",
"tf.quantize_v2",
"tf.feature_column.categorical_column_with_vocabulary_file",
"tf.shape",
"tf.size",
# TODO(b/129398290)
# "tf.string_split",
"tf.random.poisson",
"tf.sparse.add",
"tf.sparse_add",
"tf.sparse.concat",
"tf.sparse_concat",
"tf.sparse.segment_mean",
"tf.sparse.segment_sqrt_n",
"tf.sparse.segment_sum",
"tf.sparse_matmul",
"tf.sparse.reduce_max",
"tf.sparse_reduce_max",
"tf.io.decode_csv",
"tf.strings.length",
"tf.strings.reduce_join",
"tf.strings.substr",
"tf.substr",
"tf.transpose",
"tf.tuple",
"tf.parse_example",
"tf.parse_single_example",
"tf.io.parse_example",
"tf.io.parse_single_example",
"tf.while_loop",
"tf.reduce_all",
"tf.math.reduce_all",
"tf.reduce_any",
"tf.math.reduce_any",
"tf.reduce_min",
"tf.math.reduce_min",
"tf.reduce_max",
"tf.math.reduce_max",
"tf.reduce_sum",
"tf.math.reduce_sum",
"tf.reduce_mean",
"tf.math.reduce_mean",
"tf.reduce_prod",
"tf.math.reduce_prod",
"tf.reduce_logsumexp",
"tf.math.reduce_logsumexp",
"tf.reduce_join",
"tf.confusion_matrix",
"tf.math.confusion_matrix",
"tf.math.in_top_k",
"tf.nn.depth_to_space",
"tf.nn.embedding_lookup",
"tf.nn.embedding_lookup_sparse",
"tf.nn.in_top_k",
"tf.nn.space_to_depth",
"tf.test.assert_equal_graph_def",
"tf.linalg.norm",
"tf.norm",
"tf.reverse_sequence",
"tf.sparse_split",
# tf.nn.softmax_cross_entropy_with_logits *must* be called with
# keyword arguments. Add keyword arguments in rare case when they
# are not specified.
"tf.nn.softmax_cross_entropy_with_logits",
"tf.nn.fractional_avg_pool",
"tf.nn.fractional_max_pool",
"tf.image.sample_distorted_bounding_box",
"tf.gradients",
"tf.hessians",
"tf.nn.max_pool",
"tf.nn.avg_pool",
"tf.estimator.LinearClassifier",
"tf.estimator.LinearRegressor",
"tf.estimator.DNNLinearCombinedClassifier",
"tf.estimator.DNNLinearCombinedRegressor",
"tf.estimator.DNNRegressor",
"tf.estimator.DNNClassifier",
"tf.estimator.BaselineClassifier",
"tf.estimator.BaselineRegressor",
"tf.initializers.uniform_unit_scaling",
"tf.uniform_unit_scaling_initializer",
"tf.train.sdca_fprint",
"tf.train.sdca_optimizer",
"tf.train.sdca_shrink_l1",
"tf.data.experimental.TensorStructure",
"tf.data.experimental.SparseTensorStructure",
"tf.data.experimental.RaggedTensorStructure",
"tf.data.experimental.TensorArrayStructure",
}
# Manual mapping of function names to be reordered to their list of argument
# names, in order. Only use this if argument names cannot be autodetected,
# e.g. if the functions are in contrib.
self.manual_function_reorders = {
"tf.contrib.summary.audio": [
"name", "tensor", "sample_rate", "max_outputs", "family", "step"],
"tf.contrib.summary.create_file_writer": [
"logdir", "max_queue", "flush_millis", "filename_suffix", "name"],
"tf.contrib.summary.generic": [
"name", "tensor", "metadata", "family", "step"],
"tf.contrib.summary.histogram": [
"name", "tensor", "family", "step"],
"tf.contrib.summary.image": [
"name", "tensor", "bad_color", "max_images", "family", "step"],
"tf.contrib.summary.scalar": [
"name", "tensor", "family", "step"],
}
# Functions that were reordered should be changed to the new keyword args
# for safety, if positional arguments are used. If you have reversed the
# positional arguments yourself, this could do the wrong thing.
self.function_reorders = dict(reorders_v2.reorders)
self.function_reorders.update(self.manual_function_reorders)
decay_function_comment = (
ast_edits.INFO,
"To use learning rate decay schedules with TensorFlow 2.0, switch to "
"the schedules in `tf.keras.optimizers.schedules`.\n"
)
assert_return_type_comment = (
ast_edits.INFO,
"<function name> has been changed to return None, the "
"data argument has been removed, and arguments have been reordered."
"\nThe calls have been converted to compat.v1 for safety (even though "
" they may already have been correct)."
)
assert_rank_comment = (
ast_edits.INFO,
"<function name> has been changed to return None, and"
" the data and summarize arguments have been removed."
"\nThe calls have been converted to compat.v1 for safety (even though "
" they may already have been correct)."
)
contrib_layers_layer_norm_comment = (
ast_edits.WARNING,
"(Manual edit required) `tf.contrib.layers.layer_norm` has been "
"deprecated, and its implementation has been integrated with "
"`tf.keras.layers.LayerNormalization` in TensorFlow 2.0. "
"Note that, the default value of `epsilon` is changed to `1e-3` in the "
"new API from `1e-12`, and this may introduce numerical differences. "
"Please check the new API and use that instead."
)
contrib_estimator_head_comment = (
ast_edits.WARNING,
"(Manual edit required) `tf.contrib.estimator.*_head` has been "
"deprecated, and its implementation has been integrated with "
"`tf.estimator.*Head` in TensorFlow 2.0. "
"Please check the new API and use that instead."
)
initializers_no_dtype_comment = (
ast_edits.INFO, "Initializers no longer have the "
"dtype argument in the constructor or partition_info argument in the "
"__call__ method.\nThe calls have been converted to compat.v1 for "
"safety (even though they may already have been correct).")
metrics_comment = (
ast_edits.INFO,
"tf.metrics have been replaced with object oriented versions in"
" TF 2.0 and after. The metric function calls have been converted to "
"compat.v1 for backward compatibility. Please update these calls to "
"the TF 2.0 versions.")
losses_comment = (
ast_edits.INFO,
"tf.losses have been replaced with object oriented versions in"
" TF 2.0 and after. The loss function calls have been converted to "
"compat.v1 for backward compatibility. Please update these calls to "
"the TF 2.0 versions.")
# This could be done with a _rename_if_arg_not_found_transformer
deprecate_partition_strategy_comment = (
ast_edits.WARNING,
"`partition_strategy` has been removed from <function name>. "
" The 'div' strategy will be used by default.")
# make change instead
uniform_unit_scaling_initializer_comment = (
ast_edits.ERROR,
"uniform_unit_scaling_initializer has been removed. Please use"
" tf.initializers.variance_scaling instead with distribution=uniform "
"to get equivalent behaviour.")
# Make change instead (issue warning about strip_...)
export_saved_model_renamed = (
ast_edits.ERROR,
"(Manual edit required) Please rename the method export_savedmodel() "
"to export_saved_model(). Two things to note:\n\t(1) The argument "
"strip_default_attributes has been removed. The function will always "
"strip the default attributes from ops. If this breaks your code, "
"please switch to tf.compat.v1.estimator.Estimator.\n\t(2) This change "
"only effects core estimator. If you are using "
"tf.contrib.learn.Estimator, please switch to using core estimator.")
summary_api_comment = (
ast_edits.INFO,
"The TF 1.x summary API cannot be automatically migrated to TF 2.0, so "
"symbols have been converted to tf.compat.v1.summary.* and must be "
"migrated manually. Typical usage will only require changes to the "
"summary writing logic, not to individual calls like scalar(). "
"For examples of the new summary API, see the Effective TF 2.0 "
"migration document or check the TF 2.0 TensorBoard tutorials.")
contrib_summary_comment = (
ast_edits.WARNING,
"tf.contrib.summary.* functions have been migrated best-effort to "
"tf.compat.v2.summary.* equivalents where possible, but the resulting "
"code is not guaranteed to work, so please check carefully. For more "
"information about the new summary API, see the Effective TF 2.0 "
"migration document or check the updated TensorBoard tutorials.")
contrib_summary_family_arg_comment = (
ast_edits.WARNING,
"<function name> replacement does not accept a 'family' argument; "
"instead regular name scoping should be used. This call site specifies "
"a family argument that has been removed on conversion, so the emitted "
"tag names may be incorrect without manual editing.")
contrib_create_file_writer_comment = (
ast_edits.WARNING,
"tf.contrib.summary.create_file_writer() has been ported to the new "
"tf.compat.v2.summary.create_file_writer(), which no longer re-uses "
"existing event files for the same logdir; instead it always opens a "
"new writer/file. The python writer objects must be re-used explicitly "
"if the reusing behavior is desired.")
contrib_summary_record_every_n_comment = (
ast_edits.ERROR,
"(Manual edit required) "
"tf.contrib.summary.record_summaries_every_n_global_steps(n, step) "
"should be replaced by a call to tf.compat.v2.summary.record_if() with "
"the argument `lambda: tf.math.equal(0, global_step % n)` (or in graph "
"mode, the lambda body can be used directly). If no global step was "
"passed, instead use tf.compat.v1.train.get_or_create_global_step().")
contrib_summary_graph_comment = (
ast_edits.ERROR,
"(Manual edit required) tf.contrib.summary.graph() has no direct "
"equivalent in TF 2.0 because manual graph construction has been "
"superseded by use of tf.function. To log tf.function execution graphs "
"to the summary writer, use the new tf.compat.v2.summary.trace_* "
"functions instead.")
contrib_summary_import_event_comment = (
ast_edits.ERROR,
"(Manual edit required) tf.contrib.summary.import_event() has no "
"direct equivalent in TF 2.0. For a similar experimental feature, try "
"tf.compat.v2.summary.experimental.write_raw_pb() which also accepts "
"serialized summary protocol buffer input, but for tf.Summary "
"protobufs rather than tf.Events.")
keras_default_save_format_comment = (
ast_edits.WARNING,
"(This warning is only applicable if the code saves a tf.Keras model) "
"Keras model.save now saves to the Tensorflow SavedModel format by "
"default, instead of HDF5. To continue saving to HDF5, add the "
"argument save_format='h5' to the save() function.")
distribute_strategy_api_changes = (
"If you're using the strategy with a "
"custom training loop, note the following changes in methods: "
"make_dataset_iterator->experimental_distribute_dataset, "
"experimental_make_numpy_iterator->experimental_make_numpy_dataset, "
"extended.call_for_each_replica->run, "
"reduce requires an axis argument, "
"unwrap->experimental_local_results "
"experimental_initialize and experimental_finalize no longer needed ")
contrib_mirrored_strategy_warning = (
ast_edits.ERROR,
"(Manual edit required) tf.contrib.distribute.MirroredStrategy has "
"been migrated to tf.distribute.MirroredStrategy. Things to note: "
"Constructor arguments have changed. If you are using "
"MirroredStrategy with Keras training framework, the input provided to "
"`model.fit` will be assumed to have global batch size and split "
"across the replicas. " + distribute_strategy_api_changes)
core_mirrored_strategy_warning = (
ast_edits.WARNING,
"(Manual edit may be required) tf.distribute.MirroredStrategy API has "
"changed. " + distribute_strategy_api_changes)
contrib_one_device_strategy_warning = (
ast_edits.ERROR,
"(Manual edit required) tf.contrib.distribute.OneDeviceStrategy has "
"been migrated to tf.distribute.OneDeviceStrategy. " +
distribute_strategy_api_changes)
contrib_tpu_strategy_warning = (
ast_edits.ERROR,
"(Manual edit required) tf.contrib.distribute.TPUStrategy has "
"been migrated to tf.distribute.TPUStrategy. Note the "
"slight changes in constructor. " + distribute_strategy_api_changes)
contrib_collective_strategy_warning = (
ast_edits.ERROR,
"(Manual edit required) "
"tf.contrib.distribute.CollectiveAllReduceStrategy has "
"been migrated to "
"tf.distribute.experimental.MultiWorkerMirroredStrategy. Note the "
"changes in constructor. " + distribute_strategy_api_changes)
contrib_ps_strategy_warning = (
ast_edits.ERROR, "(Manual edit required) "
"tf.contrib.distribute.ParameterServerStrategy has "
"been migrated to "
"tf.compat.v1.distribute.experimental.ParameterServerStrategy (multi "
"machine) and tf.distribute.experimental.CentralStorageStrategy (one "
"machine). Note the changes in constructors. " +
distribute_strategy_api_changes)
keras_experimental_export_comment = (
ast_edits.WARNING,
"tf.keras.experimental.export_saved_model and "
"tf.keras.experimental.load_from_saved_model have been deprecated."
"Please use model.save(path, save_format='tf') "
"(or alternatively tf.keras.models.save_model), and "
"tf.keras.models.load_model(path) instead.")
# Function warnings. <function name> placeholder inside warnings will be
# replaced by function name.
# You can use *. to add items which do not check the FQN, and apply to e.g.,
# methods.
self.function_warnings = {
"*.export_savedmodel":
export_saved_model_renamed,
"*.save":
keras_default_save_format_comment,
"tf.assert_equal":
assert_return_type_comment,
"tf.assert_none_equal":
assert_return_type_comment,
"tf.assert_negative":
assert_return_type_comment,
"tf.assert_positive":
assert_return_type_comment,
"tf.assert_non_negative":
assert_return_type_comment,
"tf.assert_non_positive":
assert_return_type_comment,
"tf.assert_near":
assert_return_type_comment,
"tf.assert_less":
assert_return_type_comment,
"tf.assert_less_equal":
assert_return_type_comment,
"tf.assert_greater":
assert_return_type_comment,
"tf.assert_greater_equal":
assert_return_type_comment,
"tf.assert_integer":
assert_return_type_comment,
"tf.assert_type":
assert_return_type_comment,
"tf.assert_scalar":
assert_return_type_comment,
"tf.assert_rank":
assert_rank_comment,
"tf.assert_rank_at_least":
assert_rank_comment,
"tf.assert_rank_in":
assert_rank_comment,
"tf.contrib.layers.layer_norm":
contrib_layers_layer_norm_comment,
"tf.contrib.estimator.binary_classification_head":
contrib_estimator_head_comment,
"tf.contrib.estimator.logistic_regression_head":
contrib_estimator_head_comment,
"tf.contrib.estimator.multi_class_head":
contrib_estimator_head_comment,
"tf.contrib.estimator.multi_head":
contrib_estimator_head_comment,
"tf.contrib.estimator.multi_label_head":
contrib_estimator_head_comment,
"tf.contrib.estimator.poisson_regression_head":
contrib_estimator_head_comment,
"tf.contrib.estimator.regression_head":
contrib_estimator_head_comment,
"tf.contrib.saved_model.load_keras_model":
keras_experimental_export_comment,
"tf.contrib.saved_model.save_keras_model":
keras_experimental_export_comment,
"tf.contrib.summary.all_summary_ops":
contrib_summary_comment,
"tf.contrib.summary.audio":
contrib_summary_comment,
"tf.contrib.summary.create_file_writer":
contrib_create_file_writer_comment,
"tf.contrib.summary.generic":
contrib_summary_comment,
"tf.contrib.summary.graph":
contrib_summary_graph_comment,
"tf.contrib.summary.histogram":
contrib_summary_comment,
"tf.contrib.summary.import_event":
contrib_summary_import_event_comment,
"tf.contrib.summary.image":
contrib_summary_comment,
"tf.contrib.summary.record_summaries_every_n_global_steps":
contrib_summary_record_every_n_comment,
"tf.contrib.summary.scalar":
contrib_summary_comment,
"tf.debugging.assert_equal":
assert_return_type_comment,
"tf.debugging.assert_greater":
assert_return_type_comment,
"tf.debugging.assert_greater_equal":
assert_return_type_comment,
"tf.debugging.assert_integer":
assert_return_type_comment,
"tf.debugging.assert_less":
assert_return_type_comment,
"tf.debugging.assert_less_equal":
assert_return_type_comment,
"tf.debugging.assert_near":
assert_return_type_comment,
"tf.debugging.assert_negative":
assert_return_type_comment,
"tf.debugging.assert_non_negative":
assert_return_type_comment,
"tf.debugging.assert_non_positive":
assert_return_type_comment,
"tf.debugging.assert_none_equal":
assert_return_type_comment,
"tf.debugging.assert_positive":
assert_return_type_comment,
"tf.debugging.assert_type":
assert_return_type_comment,
"tf.debugging.assert_scalar":
assert_return_type_comment,
"tf.debugging.assert_rank":
assert_rank_comment,
"tf.debugging.assert_rank_at_least":
assert_rank_comment,
"tf.debugging.assert_rank_in":
assert_rank_comment,
"tf.train.exponential_decay":
decay_function_comment,
"tf.train.piecewise_constant_decay":
decay_function_comment,
"tf.train.polynomial_decay":
decay_function_comment,
"tf.train.natural_exp_decay":
decay_function_comment,
"tf.train.inverse_time_decay":
decay_function_comment,
"tf.train.cosine_decay":
decay_function_comment,
"tf.train.cosine_decay_restarts":
decay_function_comment,
"tf.train.linear_cosine_decay":
decay_function_comment,
"tf.train.noisy_linear_cosine_decay":
decay_function_comment,
"tf.nn.embedding_lookup":
deprecate_partition_strategy_comment,
"tf.nn.embedding_lookup_sparse":
deprecate_partition_strategy_comment,
"tf.nn.nce_loss":
deprecate_partition_strategy_comment,
"tf.nn.safe_embedding_lookup_sparse":
deprecate_partition_strategy_comment,
"tf.nn.sampled_softmax_loss":
deprecate_partition_strategy_comment,
"tf.keras.estimator.model_to_estimator":
(ast_edits.WARNING,
"Estimators from <function name> will save object-based "
"checkpoints (format used by `keras_model.save_weights` and "
"`keras_model.load_weights`) by default in 2.0. To continue "
"saving name-based checkpoints, set `checkpoint_format='saver'`."),
"tf.keras.experimental.export_saved_model":
keras_experimental_export_comment,
"tf.keras.experimental.load_from_saved_model":
keras_experimental_export_comment,
"tf.keras.initializers.Zeros":
initializers_no_dtype_comment,
"tf.keras.initializers.zeros":
initializers_no_dtype_comment,
"tf.keras.initializers.Ones":
initializers_no_dtype_comment,
"tf.keras.initializers.ones":
initializers_no_dtype_comment,
"tf.keras.initializers.Constant":
initializers_no_dtype_comment,
"tf.keras.initializers.constant":
initializers_no_dtype_comment,
"tf.keras.initializers.VarianceScaling":
initializers_no_dtype_comment,
"tf.keras.initializers.Orthogonal":
initializers_no_dtype_comment,
"tf.keras.initializers.orthogonal":
initializers_no_dtype_comment,
"tf.keras.initializers.Identity":
initializers_no_dtype_comment,
"tf.keras.initializers.identity":
initializers_no_dtype_comment,
"tf.keras.initializers.glorot_uniform":
initializers_no_dtype_comment,
"tf.keras.initializers.glorot_normal":
initializers_no_dtype_comment,
"tf.initializers.zeros":
initializers_no_dtype_comment,
"tf.zeros_initializer":
initializers_no_dtype_comment,
"tf.initializers.ones":
initializers_no_dtype_comment,
"tf.ones_initializer":
initializers_no_dtype_comment,
"tf.initializers.constant":
initializers_no_dtype_comment,
"tf.constant_initializer":
initializers_no_dtype_comment,
"tf.initializers.random_uniform":
initializers_no_dtype_comment,
"tf.random_uniform_initializer":
initializers_no_dtype_comment,
"tf.initializers.random_normal":
initializers_no_dtype_comment,
"tf.random_normal_initializer":
initializers_no_dtype_comment,
"tf.initializers.truncated_normal":
initializers_no_dtype_comment,
"tf.truncated_normal_initializer":
initializers_no_dtype_comment,
"tf.initializers.variance_scaling":
initializers_no_dtype_comment,
"tf.variance_scaling_initializer":
initializers_no_dtype_comment,
"tf.initializers.orthogonal":
initializers_no_dtype_comment,
"tf.orthogonal_initializer":
initializers_no_dtype_comment,
"tf.initializers.identity":
initializers_no_dtype_comment,
"tf.glorot_uniform_initializer":
initializers_no_dtype_comment,
"tf.initializers.glorot_uniform":
initializers_no_dtype_comment,
"tf.glorot_normal_initializer":
initializers_no_dtype_comment,
"tf.initializers.glorot_normal":
initializers_no_dtype_comment,
"tf.losses.absolute_difference":
losses_comment,
"tf.losses.add_loss":
losses_comment,
"tf.losses.compute_weighted_loss":
losses_comment,
"tf.losses.cosine_distance":
losses_comment,
"tf.losses.get_losses":
losses_comment,
"tf.losses.get_regularization_loss":
losses_comment,
"tf.losses.get_regularization_losses":
losses_comment,
"tf.losses.get_total_loss":
losses_comment,
"tf.losses.hinge_loss":
losses_comment,
"tf.losses.huber_loss":
losses_comment,
"tf.losses.log_loss":
losses_comment,
"tf.losses.mean_pairwise_squared_error":
losses_comment,
"tf.losses.mean_squared_error":
losses_comment,
"tf.losses.sigmoid_cross_entropy":
losses_comment,
"tf.losses.softmax_cross_entropy":
losses_comment,
"tf.losses.sparse_softmax_cross_entropy":
losses_comment,
"tf.metrics.accuracy":
metrics_comment,
"tf.metrics.auc":
metrics_comment,
"tf.metrics.average_precision_at_k":
metrics_comment,
"tf.metrics.false_negatives":
metrics_comment,
"tf.metrics.false_negatives_at_thresholds":
metrics_comment,
"tf.metrics.false_positives":
metrics_comment,
"tf.metrics.false_positives_at_thresholds":
metrics_comment,
"tf.metrics.mean":
metrics_comment,
"tf.metrics.mean_absolute_error":
metrics_comment,
"tf.metrics.mean_cosine_distance":
metrics_comment,
"tf.metrics.mean_iou":
metrics_comment,
"tf.metrics.mean_per_class_accuracy":
metrics_comment,
"tf.metrics.mean_relative_error":
metrics_comment,
"tf.metrics.mean_squared_error":
metrics_comment,
"tf.metrics.mean_tensor":
metrics_comment,
"tf.metrics.percentage_below":
metrics_comment,
"tf.metrics.precision":
metrics_comment,
"tf.metrics.precision_at_k":
metrics_comment,
"tf.metrics.precision_at_thresholds":
metrics_comment,
"tf.metrics.precision_at_top_k":
metrics_comment,
"tf.metrics.recall":
metrics_comment,
"tf.metrics.recall_at_k":
metrics_comment,
"tf.metrics.recall_at_thresholds":
metrics_comment,
"tf.metrics.recall_at_top_k":
metrics_comment,
"tf.metrics.root_mean_squared_error":
metrics_comment,
"tf.metrics.sensitivity_at_specificity":
metrics_comment,
"tf.metrics.sparse_average_precision_at_k":
metrics_comment,
"tf.metrics.sparse_precision_at_k":
metrics_comment,
"tf.metrics.specificity_at_sensitivity":
metrics_comment,
"tf.metrics.true_negatives":
metrics_comment,
"tf.metrics.true_negatives_at_thresholds":
metrics_comment,
"tf.metrics.true_positives":
metrics_comment,
"tf.metrics.true_positives_at_thresholds":
metrics_comment,
"tf.get_variable":
(ast_edits.WARNING,
"<function name> returns ResourceVariables by default in 2.0, "
"which have well-defined semantics and are stricter about shapes. "
"You can disable this behavior by passing use_resource=False, or "
"by calling tf.compat.v1.disable_resource_variables()."),
"tf.pywrap_tensorflow":
(ast_edits.ERROR,
"<function name> cannot be converted automatically. "
"`tf.pywrap_tensorflow` will not be distributed with "
"TensorFlow 2.0, please consider an alternative in public "
"TensorFlow APIs."),
"tf.contrib.distribute.MirroredStrategy":
contrib_mirrored_strategy_warning,
"tf.distribute.MirroredStrategy":
core_mirrored_strategy_warning,
"tf.contrib.distribute.OneDeviceStrategy":
contrib_one_device_strategy_warning,
"tf.contrib.distribute.TPUStrategy":
contrib_tpu_strategy_warning,
"tf.contrib.distribute.CollectiveAllReduceStrategy":
contrib_collective_strategy_warning,
"tf.contrib.distribute.ParameterServerStrategy":
contrib_ps_strategy_warning,
"tf.summary.FileWriter": summary_api_comment,
"tf.summary.FileWriterCache": summary_api_comment,
"tf.summary.Summary": summary_api_comment,
"tf.summary.audio": summary_api_comment,
"tf.summary.histogram": summary_api_comment,
"tf.summary.image": summary_api_comment,
"tf.summary.merge": summary_api_comment,
"tf.summary.merge_all": summary_api_comment,
"tf.summary.scalar": summary_api_comment,
"tf.summary.tensor_summary": summary_api_comment,
"tf.summary.text": summary_api_comment,
}
all_renames_v2.add_contrib_direct_import_support(self.function_warnings)
for symbol, replacement in all_renames_v2.addons_symbol_mappings.items():
warning = (
ast_edits.WARNING, (
"(Manual edit required) `{}` has been migrated to `{}` in "
"TensorFlow Addons. The API spec may have changed during the "
"migration. Please see https://github.com/tensorflow/addons "
"for more info.").format(symbol, replacement))
self.function_warnings[symbol] = warning
# Warnings that are emitted only if a specific arg is found.
self.function_arg_warnings = {
"tf.nn.conv1d": {
("use_cudnn_on_gpu", 4):
(ast_edits.WARNING,
"use_cudnn_on_gpu has been removed, behavior is now equivalent"
"to setting it to True."),
},
"tf.nn.conv2d": {
("use_cudnn_on_gpu", 4):
(ast_edits.WARNING,
"use_cudnn_on_gpu has been removed, behavior is now equivalent"
"to setting it to True."),
},
"tf.nn.conv2d_backprop_filter": {
("use_cudnn_on_gpu", 5):
(ast_edits.WARNING,
"use_cudnn_on_gpu has been removed, behavior is now equivalent"
"to setting it to True."),
},
"tf.nn.conv2d_backprop_input": {
("use_cudnn_on_gpu", 5):
(ast_edits.WARNING,
"use_cudnn_on_gpu has been removed, behavior is now equivalent"
"to setting it to True."),
},
"tf.gradients": {
("colocate_gradients_with_ops", 4):
(ast_edits.INFO, "tf.gradients no longer takes "
"'colocate_gradients_with_ops' argument, it behaves as if it "
"was set to True."),
},
"tf.hessians": {
("colocate_gradients_with_ops", 3):
(ast_edits.INFO, "tf.hessians no longer takes "
"'colocate_gradients_with_ops' argument, it behaves as if it "
"was set to True."),
},
"*.minimize": {
("colocate_gradients_with_ops", 5):
(ast_edits.INFO, "Optimizer.minimize no longer takes "
"'colocate_gradients_with_ops' argument, it behaves as if it "
"was set to True."),
},
"*.compute_gradients": {
("colocate_gradients_with_ops", 4):
(ast_edits.INFO, "Optimizer.compute_gradients no "
"longer takes 'colocate_gradients_with_ops' argument, it "
"behaves as if it was set to True."),
},
"tf.cond": {
("strict", 3):
(ast_edits.WARNING,
"tf.cond no longer takes 'strict' argument, it behaves as "
"if was set to True.")
},
"tf.contrib.summary.audio": {
("family", 4): contrib_summary_family_arg_comment,
},
"tf.contrib.summary.create_file_writer": {
("name", 4):
(ast_edits.WARNING,
"tf.contrib.summary.create_file_writer() no longer supports "
"implicit writer re-use based on shared logdirs or resource "
"names; this call site passed a 'name' argument that has been "
"removed. The new tf.compat.v2.summary.create_file_writer() "
"replacement has a 'name' parameter but the semantics are "
"the usual ones to name the op itself and do not control "
"writer re-use; writers must be manually re-used if desired.")
},
"tf.contrib.summary.generic": {
("name", 0): (
ast_edits.WARNING,
"tf.contrib.summary.generic() takes a 'name' argument for the "
"op name that also determines the emitted tag (prefixed by any "
"active name scopes), but tf.compat.v2.summary.write(), which "
"replaces it, separates these into 'tag' and 'name' arguments. "
"The 'name' argument here has been converted to 'tag' to "
"preserve a meaningful tag, but any name scopes will not be "
"reflected in the tag without manual editing."),
("family", 3): contrib_summary_family_arg_comment,
},
"tf.contrib.summary.histogram": {
("family", 2): contrib_summary_family_arg_comment,
},
"tf.contrib.summary.image": {
("bad_color", 2): (
ast_edits.WARNING,
"tf.contrib.summary.image no longer takes the 'bad_color' "
"argument; caller must now preprocess if needed. This call "
"site specifies a bad_color argument so it cannot be converted "
"safely."),
("family", 4): contrib_summary_family_arg_comment,
},
"tf.contrib.summary.scalar": {
("family", 2): contrib_summary_family_arg_comment,
},
"tf.image.resize": {
("align_corners", 3):
(ast_edits.WARNING,
"align_corners is not supported by tf.image.resize, the new "
"default transformation is close to what v1 provided. If you "
"require exactly the same transformation as before, use "
"compat.v1.image.resize."),
},
"tf.image.resize_bilinear": {
("align_corners", 2):
(ast_edits.WARNING,
"align_corners is not supported by tf.image.resize, the new "
"default transformation is close to what v1 provided. If you "
"require exactly the same transformation as before, use "
"compat.v1.image.resize_bilinear."),
},
"tf.image.resize_area": {
("align_corners", 2):
(ast_edits.WARNING,
"align_corners is not supported by tf.image.resize, the new "
"default transformation is close to what v1 provided. If you "
"require exactly the same transformation as before, use "
"compat.v1.image.resize_area."),
},
"tf.image.resize_bicubic": {
("align_corners", 2):
(ast_edits.WARNING,
"align_corners is not supported by tf.image.resize, the new "
"default transformation is close to what v1 provided. If you "
"require exactly the same transformation as before, use "
"compat.v1.image.resize_bicubic."),
},
"tf.image.resize_nearest_neighbor": {
("align_corners", 2):
(ast_edits.WARNING,
"align_corners is not supported by tf.image.resize, the new "
"default transformation is close to what v1 provided. If you "
"require exactly the same transformation as before, use "
"compat.v1.image.resize_nearest_neighbor."),
},
}
all_renames_v2.add_contrib_direct_import_support(self.function_arg_warnings)
# Specially handled functions
# Each transformer is a callable which will be called with the arguments
# transformer(parent, node, full_name, name, logs)
# Where logs is a list to which (level, line, col, msg) tuples can be
# appended, full_name is the FQN of the function called (or None if that is
# unknown), name is the name of the function called (or None is that is
# unknown). node is an ast.Call node representing this function call, and
# parent is its parent in the AST.
# The function may modify node (but not parent), and must return
# - none, if nothing was modified
# - node, if node was modified in place (make sure to use
# pasta.ast_utils.replace_child to swap out children, otherwise formatting
# may get messy)
# - a replacement for node, if the whole call node was replaced. The caller
# will take care of changing parent.
canned_estimator_msg_optimizer = (
"tf.keras.optimizers.* only, so the call was converted to compat.v1. "
"Please note that tf.train.Optimizers have one-to-one correspondents "
"in tf.keras.optimizers, so you may be able to convert to the new "
"optimizers directly (See https://www.tensorflow.org/api_docs/python"
"/tf/keras/optimizers). Checkpoint compatibility is not guaranteed, "
"but there is a checkpoint converter tool that you can use.")
canned_estimator_msg = (
"no longer takes `input_layer_partitioner` arg, and it supports "
+ canned_estimator_msg_optimizer)
self.function_transformers = {
"*.make_initializable_iterator": _iterator_transformer,
"*.make_one_shot_iterator": _iterator_transformer,
"tf.nn.dropout": _dropout_transformer,
"tf.to_bfloat16": _cast_transformer,
"tf.to_complex128": _cast_transformer,
"tf.to_complex64": _cast_transformer,
"tf.to_double": _cast_transformer,
"tf.to_float": _cast_transformer,
"tf.to_int32": _cast_transformer,
"tf.to_int64": _cast_transformer,
"tf.nn.softmax_cross_entropy_with_logits":
_softmax_cross_entropy_with_logits_transformer,
"tf.image.extract_glimpse": _extract_glimpse_transformer,
"tf.image.resize_area": _image_resize_transformer,
"tf.image.resize_bicubic": _image_resize_transformer,
"tf.image.resize_bilinear": _image_resize_transformer,
"tf.image.resize_nearest_neighbor": _image_resize_transformer,
"tf.nn.fractional_avg_pool": _pool_seed_transformer,
"tf.nn.fractional_max_pool": _pool_seed_transformer,
"tf.name_scope": _name_scope_transformer,
# TODO(b/129398290)
# "tf.string_split": _string_split_transformer,
"tf.strings.split": _string_split_rtype_transformer,
"tf.estimator.BaselineEstimator":
functools.partial(
_rename_if_arg_found_transformer,
arg_name="optimizer",
message=("tf.estimator.BaselineEstimator supports "
+ canned_estimator_msg_optimizer),
),
"tf.estimator.BaselineClassifier":
functools.partial(
_rename_if_arg_found_and_add_loss_reduction_transformer,
arg_names=["optimizer"],
message=("tf.estimator.BaselineClassifier supports "
+ canned_estimator_msg_optimizer),
),
"tf.estimator.BaselineRegressor":
functools.partial(
_rename_if_arg_found_and_add_loss_reduction_transformer,
arg_names=["input_layer_partitioner", "optimizer"],
message=("tf.estimator.BaselineRegressor supports "
+ canned_estimator_msg_optimizer),
),
"tf.estimator.DNNEstimator":
functools.partial(
_rename_if_any_arg_found_transformer,
arg_names=["input_layer_partitioner", "optimizer"],
message="tf.estimator.DNNEstimator no longer takes "
"input_layer_partitioner, so the call was converted to "
"compat.v1."
),
"tf.estimator.DNNClassifier":
functools.partial(
_rename_if_arg_found_and_add_loss_reduction_transformer,
arg_names=["input_layer_partitioner", "optimizer"],
message="tf.estimator.DNNClassifier " + canned_estimator_msg,
),
"tf.estimator.DNNRegressor":
functools.partial(
_rename_if_arg_found_and_add_loss_reduction_transformer,
arg_names=["input_layer_partitioner", "optimizer"],
message="tf.estimator.DNNRegressor " + canned_estimator_msg,
),
"tf.estimator.LinearEstimator":
functools.partial(
_rename_if_any_arg_found_transformer,
arg_names=["input_layer_partitioner", "optimizer"],
message="tf.estimator.LinearEstimator " + canned_estimator_msg,
),
"tf.estimator.LinearClassifier":
functools.partial(
_rename_if_arg_found_and_add_loss_reduction_transformer,
arg_names=["input_layer_partitioner", "optimizer"],
message="tf.estimator.LinearClassifier " + canned_estimator_msg,
),
"tf.estimator.LinearRegressor":
functools.partial(
_rename_if_arg_found_and_add_loss_reduction_transformer,
arg_names=["input_layer_partitioner", "optimizer"],
message="tf.estimator.LinearRegressor " + canned_estimator_msg,
),
"tf.estimator.DNNLinearCombinedEstimator":
functools.partial(
_rename_if_any_arg_found_transformer,
arg_names=[
"input_layer_partitioner", "dnn_optimizer",
"linear_optimizer"
],
message=("tf.estimator.DNNLinearCombinedEstimator "
+ canned_estimator_msg),
),
"tf.estimator.DNNLinearCombinedClassifier":
functools.partial(
_rename_if_arg_found_and_add_loss_reduction_transformer,
arg_names=[
"input_layer_partitioner", "dnn_optimizer",
"linear_optimizer"
],
message=("tf.estimator.DNNLinearCombinedClassifier "
+ canned_estimator_msg),
),
"tf.estimator.DNNLinearCombinedRegressor":
functools.partial(
_rename_if_arg_found_and_add_loss_reduction_transformer,
arg_names=[
"input_layer_partitioner", "dnn_optimizer",
"linear_optimizer"
],
message=("tf.estimator.DNNLinearCombinedRegressor "
+ canned_estimator_msg),
),
"tf.device": functools.partial(
_rename_if_arg_found_transformer, arg_name="device_name",
arg_ok_predicate=_is_ast_str, remove_if_ok=False,
message="tf.device no longer takes functions as an argument. "
"We could not determine that the argument value is a string, so "
"the call was converted to compat.v1."),
"tf.zeros_like": functools.partial(
_rename_if_arg_found_transformer, arg_name="optimize",
arg_ok_predicate=_is_ast_true, remove_if_ok=True,
message="tf.zeros_like no longer takes an optimize argument, and "
"behaves as if optimize=True. This call site specifies something "
"other than optimize=True, so it was converted to compat.v1."),
"tf.ones_like": functools.partial(
_rename_if_arg_found_transformer, arg_name="optimize",
arg_ok_predicate=_is_ast_true, remove_if_ok=True,
message="tf.ones_like no longer takes an optimize argument, and "
"behaves as if optimize=True. This call site specifies something "
"other than optimize=True, so it was converted to compat.v1."),
"tf.while_loop": functools.partial(
_rename_if_arg_found_transformer,
arg_name="return_same_structure",
arg_ok_predicate=_is_ast_true, remove_if_ok=True,
message="tf.while_loop no longer takes 'return_same_structure' "
"argument and behaves as if return_same_structure=True. This call "
"site specifies something other than return_same_structure=True, "
"so it was converted to compat.v1."),
"tf.nn.ctc_beam_search_decoder": functools.partial(
_rename_if_arg_found_transformer,
arg_name="merge_repeated",
arg_ok_predicate=_is_ast_false, remove_if_ok=True,
message="tf.nn.ctc_beam_search_decoder no longer takes the "
"'merge_repeated' argument and behaves as if merge_repeated=False. "
"This call site specifies something other than "
"merge_repeated=False, so it was converted to compat.v1."),
"tf.nn.dilation2d": functools.partial(
_add_argument_transformer,
arg_name="data_format",
arg_value_ast=ast.Str("NHWC")),
"tf.nn.erosion2d": functools.partial(
_add_argument_transformer,
arg_name="data_format",
arg_value_ast=ast.Str("NHWC")),
"tf.contrib.summary.always_record_summaries": functools.partial(
_add_summary_recording_cond_transformer, cond="True"),
"tf.contrib.summary.audio": _add_summary_step_transformer,
"tf.contrib.summary.generic": _add_summary_step_transformer,
"tf.contrib.summary.histogram": _add_summary_step_transformer,
"tf.contrib.summary.image": _add_summary_step_transformer,
"tf.contrib.summary.never_record_summaries": functools.partial(
_add_summary_recording_cond_transformer, cond="False"),
"tf.contrib.summary.scalar": _add_summary_step_transformer,
"tf.contrib.layers.l1_regularizer":
_contrib_layers_l1_regularizer_transformer,
"tf.contrib.layers.l2_regularizer":
_contrib_layers_l2_regularizer_transformer,
"tf.contrib.layers.xavier_initializer":
_contrib_layers_xavier_initializer_transformer,
"tf.contrib.layers.xavier_initializer_conv2d":
_contrib_layers_xavier_initializer_transformer,
"tf.contrib.layers.variance_scaling_initializer":
_contrib_layers_variance_scaling_initializer_transformer,
"tf.initializers.uniform_unit_scaling":
_add_uniform_scaling_initializer_transformer,
"tf.uniform_unit_scaling_initializer":
_add_uniform_scaling_initializer_transformer,
"slim.l1_regularizer":
_contrib_layers_l1_regularizer_transformer,
"slim.l2_regularizer":
_contrib_layers_l2_regularizer_transformer,
"slim.xavier_initializer":
_contrib_layers_xavier_initializer_transformer,
"slim.xavier_initializer_conv2d":
_contrib_layers_xavier_initializer_transformer,
"slim.variance_scaling_initializer":
_contrib_layers_variance_scaling_initializer_transformer,
"tf.keras.models.save_model": functools.partial(
_add_argument_transformer,
arg_name="save_format",
arg_value_ast=ast.Str("h5")),
}
all_renames_v2.add_contrib_direct_import_support(self.function_transformers)
self.module_deprecations = module_deprecations_v2.MODULE_DEPRECATIONS
def preprocess(self, root_node, after_compat_v1_upgrade=False):
visitor = ast_edits.PastaAnalyzeVisitor(TFAPIImportAnalysisSpec())
visitor.visit(root_node)
detections = set(visitor.results)
# Upgrade explicit compat v1 imports if `upgrade_compat_v1_import` is
# enabled. Then preprocess the updated root node.
# We only do this upgrading once, because some forms of the import may
# still cause errors but aren't trivially upgradeable, and we don't want
# to enter an infinite loop. E.g. `from tensorflow.compat import v1, v2`.
if (compat_v1_import in detections and self.upgrade_compat_v1_import and
not after_compat_v1_upgrade):
CompatV1ImportReplacer().visit(root_node)
return self.preprocess(root_node, after_compat_v1_upgrade=True)
# If we have detected the presence of imports of specific TF versions,
# We want to modify the update spec to check only module deprecations
# and skip all other conversions.
if detections:
self.function_handle = {}
self.function_reorders = {}
self.function_keyword_renames = {}
self.symbol_renames = {}
self.function_warnings = {}
self.change_to_function = {}
self.module_deprecations = module_deprecations_v2.MODULE_DEPRECATIONS
self.function_transformers = {}
self.import_renames = {}
return root_node, visitor.log, visitor.warnings_and_errors
def clear_preprocessing(self):
self.__init__()
def _is_ast_str(node):
"""Determine whether this node represents a string."""
allowed_types = [ast.Str]
if hasattr(ast, "Bytes"):
allowed_types += [ast.Bytes]
if hasattr(ast, "JoinedStr"):
allowed_types += [ast.JoinedStr]
if hasattr(ast, "FormattedValue"):
allowed_types += [ast.FormattedValue]
return isinstance(node, allowed_types)
def _is_ast_true(node):
if hasattr(ast, "NameConstant"):
return isinstance(node, ast.NameConstant) and node.value is True
else:
return isinstance(node, ast.Name) and node.id == "True"
def _is_ast_false(node):
if hasattr(ast, "NameConstant"):
return isinstance(node, ast.NameConstant) and node.value is False
else:
return isinstance(node, ast.Name) and node.id == "False"
# Lots of unused arguments below, since these are called in a standard manner.
# pylint: disable=unused-argument
def _rename_if_arg_found_transformer(parent, node, full_name, name, logs,
arg_name=None,
arg_ok_predicate=None,
remove_if_ok=False,
message=None):
"""Replaces the given call with tf.compat.v1 if the given arg is found.
This requires the function to be called with all named args, so for using
this transformer, the function should also be added to renames.
If the arg is not found, the call site is left alone.
If the arg is found, and if arg_ok_predicate is given, it is called with
the ast Expression representing the argument value found. If it returns
True, the function is left alone.
If the arg is found, arg_ok_predicate is not None and returns ok, and
remove_if_ok is True, the argument is removed from the call.
Otherwise, `compat.v1` is inserted between tf and the function name.
Args:
parent: Parent of node.
node: ast.Call node to maybe modify.
full_name: full name of function to modify
name: name of function to modify
logs: list of logs to append to
arg_name: name of the argument to look for
arg_ok_predicate: predicate callable with the ast of the argument value,
returns whether the argument value is allowed.
remove_if_ok: remove the argument if present and ok as determined by
arg_ok_predicate.
message: message to print if a non-ok arg is found (and hence, the function
is renamed to its compat.v1 version).
Returns:
node, if it was modified, else None.
"""
# Check whether arg is there.
arg_present, arg_value = ast_edits.get_arg_value(node, arg_name)
if not arg_present:
return
# Check whether arg is problematic (and if not, maybe remove it).
if arg_ok_predicate and arg_ok_predicate(arg_value):
if remove_if_ok:
for i, kw in enumerate(node.keywords):
if kw.arg == arg_name:
node.keywords.pop(i)
logs.append((ast_edits.INFO, node.lineno, node.col_offset,
"Removed argument %s for function %s" % (
arg_name, full_name or name)))
break
return node
else:
return
# All conditions met, insert v1 and log what we did.
# We must have a full name, so the func is an attribute.
new_name = six.ensure_str(full_name).replace("tf.", "tf.compat.v1.", 1)
node.func = ast_edits.full_name_node(new_name)
logs.append((
ast_edits.INFO, node.lineno, node.col_offset,
"Renaming %s to %s because argument %s is present. %s" %
(full_name, new_name, arg_name, message if message is not None else "")
))
return node
def _add_argument_transformer(parent, node, full_name, name, logs,
arg_name, arg_value_ast):
"""Adds an argument (as a final kwarg arg_name=arg_value_ast)."""
node.keywords.append(ast.keyword(arg=arg_name, value=arg_value_ast))
logs.append((
ast_edits.INFO, node.lineno, node.col_offset,
"Adding argument '%s' to call to %s." % (pasta.dump(node.keywords[-1]),
full_name or name)
))
return node
def _iterator_transformer(parent, node, full_name, name, logs):
"""Transform iterator methods to compat function calls."""
# First, check that node.func.value is not already something we like
# (tf.compat.v1.data), or something which is handled in the rename
# (tf.data). This transformer only handles the method call to function call
# conversion.
if full_name and (six.ensure_str(full_name).startswith("tf.compat.v1.data") or
six.ensure_str(full_name).startswith("tf.data")):
return
# This should never happen, since we're only called for Attribute nodes.
if not isinstance(node.func, ast.Attribute):
return
# Transform from x.f(y) to tf.compat.v1.data.f(x, y)
# Fortunately, node.func.value should already have valid position info
node.args = [node.func.value] + node.args
node.func.value = ast_edits.full_name_node("tf.compat.v1.data")
logs.append((ast_edits.WARNING, node.lineno, node.col_offset,
"Changing dataset.%s() to tf.compat.v1.data.%s(dataset). "
"Please check this transformation.\n" % (name, name)))
return node
def _dropout_transformer(parent, node, full_name, name, logs):
"""Replace keep_prob with 1-rate."""
def _replace_keep_prob_node(parent, old_value):
"""Replaces old_value with 1-(old_value)."""
one = ast.Num(n=1)
one.lineno = 0
one.col_offset = 0
new_value = ast.BinOp(left=one, op=ast.Sub(),
right=old_value)
# This copies the prefix and suffix on old_value to new_value.
pasta.ast_utils.replace_child(parent, old_value, new_value)
ast.copy_location(new_value, old_value)
# Put parentheses around keep_prob.value (and remove the old prefix/
# suffix, they should only be around new_value).
pasta.base.formatting.set(old_value, "prefix", "(")
pasta.base.formatting.set(old_value, "suffix", ")")
# Check if we have a keep_prob keyword arg
for keep_prob in node.keywords:
if keep_prob.arg == "keep_prob":
logs.append((ast_edits.INFO, node.lineno, node.col_offset,
"Changing keep_prob arg of tf.nn.dropout to rate\n"))
keep_prob.arg = "rate"
_replace_keep_prob_node(keep_prob, keep_prob.value)
return node
# Maybe it was a positional arg
if len(node.args) < 2:
logs.append((ast_edits.ERROR, node.lineno, node.col_offset,
"tf.nn.dropout called without arguments, so "
"automatic fix was disabled. tf.nn.dropout has changed "
"the semantics of the second argument."))
else:
rate_arg = ast.keyword(arg="rate", value=node.args[1])
_replace_keep_prob_node(rate_arg, rate_arg.value)
node.keywords.append(rate_arg)
del node.args[1]
logs.append((ast_edits.INFO, node.lineno, node.col_offset,
"Changing keep_prob arg of tf.nn.dropout to rate, and "
"recomputing value.\n"))
return node
def _cast_transformer(parent, node, full_name, name, logs):
"""Transforms to_int and to_float to cast(..., dtype=...)."""
# Find out the dtype to cast to from the function name
dtype_str = name[3:]
# Special cases where the full dtype is not given
if dtype_str == "float":
dtype_str = "float32"
elif dtype_str == "double":
dtype_str = "float64"
new_arg = ast.keyword(arg="dtype",
value=ast.Attribute(value=ast.Name(id="tf",
ctx=ast.Load()),
attr=dtype_str, ctx=ast.Load()))
# Ensures a valid transformation when a positional name arg is given
if len(node.args) == 2:
name_arg = ast.keyword(arg="name",
value=node.args[-1])
node.args = node.args[:-1]
node.keywords.append(name_arg)
# Python3 ast requires the args for the Attribute, but codegen will mess up
# the arg order if we just set them to 0.
new_arg.value.lineno = node.lineno
new_arg.value.col_offset = node.col_offset+100
node.keywords.append(new_arg)
if isinstance(node.func, ast.Attribute):
node.func.attr = "cast"
else:
assert isinstance(node.func, ast.Name)
node.func.id = "cast"
logs.append((ast_edits.INFO, node.lineno, node.col_offset,
"Changed %s call to tf.cast(..., dtype=tf.%s)." % (full_name,
dtype_str)))
return node
def _softmax_cross_entropy_with_logits_transformer(
parent, node, full_name, name, logs):
"""Wrap labels argument with stop_gradients."""
def _wrap_label(parent, old_value):
"""Wrap labels with tf.stop_gradient."""
already_stop_grad = (isinstance(old_value, ast.Call) and
isinstance(old_value.func, ast.Attribute) and
old_value.func.attr == "stop_gradient" and
isinstance(old_value.func.value, ast.Name) and
old_value.func.value.id == "tf")
if already_stop_grad:
return False
try:
new_value = ast.Call(
ast.Name(id="tf.stop_gradient", ctx=ast.Load()),
[old_value], [])
except TypeError:
new_value = ast.Call(
ast.Name(id="tf.stop_gradient", ctx=ast.Load()),
[old_value], [], None, None)
# This copies the prefix and suffix on old_value to new_value.
pasta.ast_utils.replace_child(parent, old_value, new_value)
ast.copy_location(new_value, old_value)
return True
# Check if we have a labels keyword arg
for karg in node.keywords:
if karg.arg == "labels":
if _wrap_label(karg, karg.value):
logs.append((ast_edits.INFO, node.lineno, node.col_offset,
"Changing labels arg of "
"tf.nn.softmax_cross_entropy_with_logits to "
"tf.stop_gradient(labels). Please check this "
"transformation.\n"))
return node
return node
def _image_resize_transformer(parent, node, full_name, name, logs):
"""Transforms image.resize_* to image.resize(..., method=*, ...)."""
resize_method = name[7:].upper()
new_arg = ast.keyword(arg="method",
value=ast.Attribute(
value=ast.Attribute(
value=ast.Attribute(
value=ast.Name(id="tf", ctx=ast.Load()),
attr="image", ctx=ast.Load()),
attr="ResizeMethod", ctx=ast.Load()),
attr=resize_method, ctx=ast.Load()))
# Ensures a valid transformation when a positional name arg is given
if len(node.args) == 4:
pos_arg = ast.keyword(arg="preserve_aspect_ratio",
value=node.args[-1])
node.args = node.args[:-1]
node.keywords.append(pos_arg)
if len(node.args) == 3:
pos_arg = ast.keyword(arg="align_corners",
value=node.args[-1])
node.args = node.args[:-1]
new_keywords = []
for kw in node.keywords:
if kw.arg != "align_corners":
new_keywords.append(kw)
node.keywords = new_keywords
# Python3 ast requires the args for the Attribute, but codegen will mess up
# the arg order if we just set them to 0.
new_arg.value.lineno = node.lineno
new_arg.value.col_offset = node.col_offset+100
node.keywords.append(new_arg)
if isinstance(node.func, ast.Attribute):
node.func.attr = "resize"
else:
assert isinstance(node.func, ast.Name)
node.func.id = "resize"
logs.append((ast_edits.INFO, node.lineno, node.col_offset,
"Changed %s call to tf.image.resize(..., "
"method=tf.image.ResizeMethod.%s)." % (full_name,
resize_method)))
return node
def _pool_seed_transformer(parent, node, full_name, name, logs):
"""Removes seed2 and deterministic, and adds non-zero seed if needed."""
# This requires that this function uses all kwargs (add to renames!).
seed_arg = None
deterministic = False
modified = False
new_keywords = []
for kw in node.keywords:
if sys.version_info[:2] >= (3, 5) and isinstance(kw, ast.Starred):
pass
elif kw.arg == "seed":
seed_arg = kw
elif kw.arg == "seed2" or kw.arg == "deterministic":
lineno = getattr(kw, "lineno", node.lineno)
col_offset = getattr(kw, "col_offset", node.col_offset)
logs.append((ast_edits.INFO, lineno, col_offset,
"Removed argument %s for function %s" % (
kw.arg, full_name or name)))
if kw.arg == "deterministic":
if not _is_ast_false(kw.value):
deterministic = True
modified = True
continue
new_keywords.append(kw)
if deterministic:
if seed_arg is None:
new_keywords.append(ast.keyword(arg="seed", value=ast.Num(42)))
logs.add((
ast_edits.INFO, node.lineno, node.col_offset,
"Adding seed=42 to call to %s since determinism was requested" % (
full_name or name)
))
else:
logs.add((
ast_edits.WARNING, node.lineno, node.col_offset,
"The deterministic argument is deprecated for %s, pass a "
"non-zero seed for determinism. The deterministic argument is "
"present, possibly not False, and the seed is already set. The "
"converter cannot determine whether it is nonzero, please check."
))
if modified:
node.keywords = new_keywords
return node
else:
return
def _extract_glimpse_transformer(parent, node, full_name, name, logs):
def _replace_uniform_noise_node(parent, old_value):
"""Replaces old_value with 'uniform' or 'gaussian'."""
uniform = ast.Str(s="uniform")
gaussian = ast.Str(s="gaussian")
new_value = ast.IfExp(body=uniform, test=old_value, orelse=gaussian)
# This copies the prefix and suffix on old_value to new_value.
pasta.ast_utils.replace_child(parent, old_value, new_value)
ast.copy_location(new_value, old_value)
# Put parentheses around noise.value.test (and remove the old prefix/
# suffix, they should only be around new_value.test), so that:
# "uniform" if (a if b else c) else "gaussian" is valid.
pasta.base.formatting.set(new_value.test, "prefix", "(")
pasta.base.formatting.set(new_value.test, "suffix", ")")
# Check if we have a uniform_noise keyword arg
for uniform_noise in node.keywords:
if uniform_noise.arg == "uniform_noise":
logs.append((ast_edits.INFO, node.lineno, node.col_offset,
"Changing uniform_noise arg of tf.image.extract_glimpse "
"to noise, and recomputing value. Please check this "
"transformation.\n"))
uniform_noise.arg = "noise"
value = "uniform" if uniform_noise.value else "gaussian"
_replace_uniform_noise_node(uniform_noise, uniform_noise.value)
return node
# Since `noise`/`uniform_noise` is optional arg, nothing needs to be
# done if len(node.args) < 5.
if len(node.args) >= 5:
_replace_uniform_noise_node(node, node.args[5])
logs.append((ast_edits.INFO, node.lineno, node.col_offset,
"Changing uniform_noise arg of tf.image.extract_glimpse to "
"noise, and recomputing value.\n"))
return node
def _add_summary_step_transformer(parent, node, full_name, name, logs):
"""Adds a step argument to the summary API call if not specified.
The inserted argument value is tf.compat.v1.train.get_or_create_global_step().
"""
for keyword_arg in node.keywords:
if keyword_arg.arg == "step":
return node
default_value = "tf.compat.v1.train.get_or_create_global_step()"
ast_value = ast.parse(default_value).body[0].value
del ast_value.lineno # hack to prevent spurious reordering of call args
node.keywords.append(ast.keyword(arg="step", value=ast_value))
logs.append((
ast_edits.WARNING, node.lineno, node.col_offset,
"Summary API writing function %s now requires a 'step' argument; "
"inserting default of %s." % (full_name or name, default_value)))
return node
def _add_summary_recording_cond_transformer(parent, node, full_name, name, logs,
cond):
"""Adds cond argument to tf.contrib.summary.xxx_record_summaries().
This is in anticipation of them being renamed to tf.summary.record_if(), which
requires the cond argument.
"""
node.args.append(pasta.parse(cond))
logs.append((
ast_edits.INFO, node.lineno, node.col_offset,
"Adding `%s` argument to %s in anticipation of it being renamed to "
"tf.compat.v2.summary.record_if()" % (cond, full_name or name)))
return node
def _add_loss_reduction_transformer(parent, node, full_name, name, logs):
"""Adds a loss_reduction argument if not specified.
Default value for tf.estimator.*Classifier and tf.estimator.*Regressor
loss_reduction argument changed to SUM_OVER_BATCH_SIZE. So, we update
existing calls to use the old default value `tf.keras.losses.Reduction.SUM`.
Note: to apply this transformation, symbol must be added
to reordered_function_names above.
"""
for keyword_arg in node.keywords:
if keyword_arg.arg == "loss_reduction":
return node
default_value = "tf.keras.losses.Reduction.SUM"
# Parse with pasta instead of ast to avoid emitting a spurious trailing \n.
ast_value = pasta.parse(default_value)
node.keywords.append(ast.keyword(arg="loss_reduction", value=ast_value))
logs.append((
ast_edits.INFO, node.lineno, node.col_offset,
"%s: Default value of loss_reduction has been changed to "
"SUM_OVER_BATCH_SIZE; inserting old default value %s.\n"
% (full_name or name, default_value)))
return node
def _rename_if_any_arg_found_transformer(
parent,
node,
full_name,
name,
logs,
arg_names=None,
arg_ok_predicate=None,
remove_if_ok=False,
message=None):
"""Replaces the given call with tf.compat.v1 if any of the arg_names is found.
Args:
parent: Parent of node.
node: ast.Call node to modify.
full_name: full name of function to modify.
name: name of function to modify.
logs: list of logs to append to.
arg_names: list of names of the argument to look for.
arg_ok_predicate: predicate callable with the ast of the argument value,
returns whether the argument value is allowed.
remove_if_ok: remove the argument if present and ok as determined by
arg_ok_predicate.
message: message to print if a non-ok arg is found (and hence, the function
is renamed to its compat.v1 version).
Returns:
node, if it was modified, else None.
"""
for arg_name in arg_names:
rename_node = _rename_if_arg_found_transformer(parent, node,
full_name, name, logs,
arg_name, arg_ok_predicate,
remove_if_ok, message)
node = rename_node if rename_node else node
return node
def _rename_if_arg_found_and_add_loss_reduction_transformer(
parent,
node,
full_name,
name,
logs,
arg_names=None,
arg_ok_predicate=None,
remove_if_ok=False,
message=None):
"""Combination of _rename_if_arg_found and _add_loss_reduction transformers.
Args:
parent: Parent of node.
node: ast.Call node to maybe modify.
full_name: full name of function to modify
name: name of function to modify
logs: list of logs to append to
arg_names: list of names of the argument to look for
arg_ok_predicate: predicate callable with the ast of the argument value,
returns whether the argument value is allowed.
remove_if_ok: remove the argument if present and ok as determined by
arg_ok_predicate.
message: message to print if a non-ok arg is found (and hence, the function
is renamed to its compat.v1 version).
Returns:
node, if it was modified, else None.
"""
node = _add_loss_reduction_transformer(parent, node, full_name, name, logs)
for arg_name in arg_names:
rename_node = _rename_if_arg_found_transformer(parent, node, full_name,
name, logs, arg_name,
arg_ok_predicate,
remove_if_ok, message)
node = rename_node if rename_node else node
return node
def _add_uniform_scaling_initializer_transformer(
parent, node, full_name, name, logs):
"""Updates references to uniform_unit_scaling_initializer.
Transforms:
tf.uniform_unit_scaling_initializer(factor, seed, dtype) to
tf.compat.v1.keras.initializers.VarianceScaling(
scale=factor, distribution="uniform", seed=seed)
Note: to apply this transformation, symbol must be added
to reordered_function_names above.
"""
for keyword_arg in node.keywords:
if keyword_arg.arg == "factor":
keyword_arg.arg = "scale"
distribution_value = "\"uniform\""
# Parse with pasta instead of ast to avoid emitting a spurious trailing \n.
ast_value = pasta.parse(distribution_value)
node.keywords.append(ast.keyword(arg="distribution", value=ast_value))
lineno = node.func.value.lineno
col_offset = node.func.value.col_offset
node.func.value = ast_edits.full_name_node("tf.compat.v1.keras.initializers")
node.func.value.lineno = lineno
node.func.value.col_offset = col_offset
node.func.attr = "VarianceScaling"
return node
def _contrib_layers_xavier_initializer_transformer(
parent, node, full_name, name, logs):
"""Updates references to contrib.layers.xavier_initializer.
Transforms:
tf.contrib.layers.xavier_initializer(uniform, seed, dtype) to
tf.compat.v1.keras.initializers.VarianceScaling(
scale=1.0, mode="fan_avg",
distribution=("uniform" if uniform else "truncated_normal"),
seed=seed, dtype=dtype)
Returns: The new node
"""
def _get_distribution(old_value):
"""Returns an AST matching the following:
("uniform" if (old_value) else "truncated_normal")
"""
dist = pasta.parse("\"uniform\" if old_value else \"truncated_normal\"")
ifexpr = dist.body[0].value
pasta.ast_utils.replace_child(ifexpr, ifexpr.test, old_value)
pasta.base.formatting.set(dist, "prefix", "(")
pasta.base.formatting.set(dist, "suffix", ")")
return dist
found_distribution = False
for keyword_arg in node.keywords:
if keyword_arg.arg == "uniform":
found_distribution = True
keyword_arg.arg = "distribution"
old_value = keyword_arg.value
new_value = _get_distribution(keyword_arg.value)
pasta.ast_utils.replace_child(keyword_arg, old_value, new_value)
pasta.base.formatting.set(keyword_arg.value, "prefix", "(")
pasta.base.formatting.set(keyword_arg.value, "suffix", ")")
new_keywords = []
scale = pasta.parse("1.0")
new_keywords.append(ast.keyword(arg="scale", value=scale))
mode = pasta.parse("\"fan_avg\"")
new_keywords.append(ast.keyword(arg="mode", value=mode))
if len(node.args) >= 1:
found_distribution = True
dist = _get_distribution(node.args[0])
new_keywords.append(ast.keyword(arg="distribution", value=dist))
if not found_distribution:
# Parse with pasta instead of ast to avoid emitting a spurious trailing \n.
uniform_dist = pasta.parse("\"uniform\"")
new_keywords.append(ast.keyword(arg="distribution", value=uniform_dist))
if len(node.args) >= 2:
new_keywords.append(ast.keyword(arg="seed", value=node.args[1]))
if len(node.args) >= 3:
new_keywords.append(ast.keyword(arg="dtype", value=node.args[2]))
node.args = []
node.keywords = new_keywords + node.keywords
lineno = node.func.value.lineno
col_offset = node.func.value.col_offset
node.func.value = ast_edits.full_name_node("tf.compat.v1.keras.initializers")
node.func.value.lineno = lineno
node.func.value.col_offset = col_offset
node.func.attr = "VarianceScaling"
logs.append((ast_edits.INFO, node.lineno, node.col_offset,
"Changing tf.contrib.layers xavier initializer"
" to a tf.compat.v1.keras.initializers.VarianceScaling and"
" converting arguments.\n"))
return node
def _contrib_layers_variance_scaling_initializer_transformer(
parent, node, full_name, name, logs):
"""Updates references to contrib.layers.variance_scaling_initializer.
Transforms:
tf.contrib.layers.variance_scaling_initializer(
factor, mode, uniform, seed, dtype
) to
tf.compat.v1.keras.initializers.VarianceScaling(
scale=factor, mode=mode.lower(),
distribution=("uniform" if uniform else "truncated_normal"),
seed=seed, dtype=dtype)
And handles the case where no factor is provided and scale needs to be
set to 2.0 to match contrib's default instead of tf.keras.initializer's
default of 1.0
"""
def _replace_distribution(parent, old_value):
"""Replaces old_value: ("uniform" if (old_value) else "truncated_normal")"""
new_value = pasta.parse(
"\"uniform\" if old_value else \"truncated_normal\"")
ifexpr = new_value.body[0].value
pasta.ast_utils.replace_child(ifexpr, ifexpr.test, old_value)
pasta.ast_utils.replace_child(parent, old_value, new_value)
pasta.base.formatting.set(new_value, "prefix", "(")
pasta.base.formatting.set(new_value, "suffix", ")")
def _replace_mode(parent, old_value):
"""Replaces old_value with (old_value).lower()."""
new_value = pasta.parse("mode.lower()")
mode = new_value.body[0].value.func
pasta.ast_utils.replace_child(mode, mode.value, old_value)
# This copies the prefix and suffix on old_value to new_value.
pasta.ast_utils.replace_child(parent, old_value, new_value)
# Put parentheses around keep_prob.value (and remove the old prefix/
# suffix, they should only be around new_value).
pasta.base.formatting.set(old_value, "prefix", "(")
pasta.base.formatting.set(old_value, "suffix", ")")
# Need to keep track of scale because slim & keras
# have different defaults
found_scale = False
for keyword_arg in node.keywords:
if keyword_arg.arg == "factor":
keyword_arg.arg = "scale"
found_scale = True
if keyword_arg.arg == "mode":
_replace_mode(keyword_arg, keyword_arg.value)
if keyword_arg.arg == "uniform":
keyword_arg.arg = "distribution"
_replace_distribution(keyword_arg, keyword_arg.value)
# Handle any detected positional arguments
if len(node.args) >= 1:
found_scale = True
if len(node.args) >= 2:
_replace_mode(node, node.args[1])
if len(node.args) >= 3:
_replace_distribution(node, node.args[2])
# If no scale was provided, make tf 2.0 use slim's default factor
if not found_scale:
# Parse with pasta instead of ast to avoid emitting a spurious trailing \n.
scale_value = pasta.parse("2.0")
node.keywords = ([ast.keyword(arg="scale", value=scale_value)]
+ node.keywords)
lineno = node.func.value.lineno
col_offset = node.func.value.col_offset
node.func.value = ast_edits.full_name_node("tf.compat.v1.keras.initializers")
node.func.value.lineno = lineno
node.func.value.col_offset = col_offset
node.func.attr = "VarianceScaling"
logs.append((ast_edits.INFO, node.lineno, node.col_offset,
"Changing tf.contrib.layers.variance_scaling_initializer"
" to a tf.compat.v1.keras.initializers.VarianceScaling and"
" converting arguments.\n"))
return node
def _contrib_layers_l1_regularizer_transformer(
parent, node, full_name, name, logs):
"""Replace slim l1 regularizer with Keras one.
This entails renaming the 'scale' arg to 'l' and dropping any
provided scope arg.
"""
# Check if we have a scale or scope keyword arg
scope_keyword = None
for keyword in node.keywords:
if keyword.arg == "scale":
logs.append((ast_edits.INFO, node.lineno, node.col_offset,
"Renaming scale arg of regularizer\n"))
keyword.arg = "l"
if keyword.arg == "scope":
scope_keyword = keyword
# Remove the scope keyword or arg if it is present
if scope_keyword:
logs.append((ast_edits.INFO, node.lineno, node.col_offset,
"Dropping scope arg from tf.contrib.layers.l1_regularizer,"
" because it is unsupported in tf.keras.regularizers.l1\n"))
node.keywords.remove(scope_keyword)
if len(node.args) > 1:
node.args = node.args[:1]
logs.append((ast_edits.INFO, node.lineno, node.col_offset,
"Dropping scope arg from tf.contrib.layers.l1_regularizer,"
" because it is unsupported in tf.keras.regularizers.l1\n"))
lineno = node.func.value.lineno
col_offset = node.func.value.col_offset
node.func.value = ast_edits.full_name_node("tf.keras.regularizers")
node.func.value.lineno = lineno
node.func.value.col_offset = col_offset
node.func.attr = "l1"
return node
def _contrib_layers_l2_regularizer_transformer(
parent, node, full_name, name, logs):
"""Replace slim l2 regularizer with Keras one, with l=0.5*scale.
Also drops the scope argument.
"""
def _replace_scale_node(parent, old_value):
"""Replaces old_value with 0.5*(old_value)."""
half = ast.Num(n=0.5)
half.lineno = 0
half.col_offset = 0
new_value = ast.BinOp(left=half, op=ast.Mult(),
right=old_value)
# This copies the prefix and suffix on old_value to new_value.
pasta.ast_utils.replace_child(parent, old_value, new_value)
# Put parentheses around scale.value (and remove the old prefix/
# suffix, they should only be around new_value).
pasta.base.formatting.set(old_value, "prefix", "(")
pasta.base.formatting.set(old_value, "suffix", ")")
# Check if we have a scale or scope keyword arg
scope_keyword = None
for keyword in node.keywords:
if keyword.arg == "scale":
keyword.arg = "l"
_replace_scale_node(keyword, keyword.value)
if keyword.arg == "scope":
scope_keyword = keyword
# Maybe it was a positional arg
if len(node.args) >= 1:
_replace_scale_node(node, node.args[0])
# Remove the scope keyword or arg if it is present
if scope_keyword:
logs.append((ast_edits.INFO, node.lineno, node.col_offset,
"Dropping scope arg from tf.contrib.layers.l2_regularizer,"
" because it is unsupported in tf.keras.regularizers.l2\n"))
node.keywords.remove(scope_keyword)
if len(node.args) > 1:
node.args = node.args[:1]
logs.append((ast_edits.INFO, node.lineno, node.col_offset,
"Dropping scope arg from tf.contrib.layers.l2_regularizer,"
" because it is unsupported in tf.keras.regularizers.l2\n"))
logs.append((ast_edits.INFO, node.lineno, node.col_offset,
"Multiplying scale arg of tf.contrib.layers.l2_regularizer"
" by half to what tf.keras.regularizers.l2 expects.\n"))
lineno = node.func.value.lineno
col_offset = node.func.value.col_offset
node.func.value = ast_edits.full_name_node("tf.keras.regularizers")
node.func.value.lineno = lineno
node.func.value.col_offset = col_offset
node.func.attr = "l2"
return node
def _name_scope_transformer(parent, node, full_name, name, logs):
"""Fix name scope invocation to use 'default_name' and omit 'values' args."""
name_found, name = ast_edits.get_arg_value(node, "name", 0)
default_found, default_name = ast_edits.get_arg_value(node, "default_name", 1)
# If an actual name was given...
if name_found and pasta.dump(name) != "None":
logs.append((ast_edits.INFO, node.lineno, node.col_offset,
"`name` passed to `name_scope`. Because you may be re-entering"
" an existing scope, it is not safe to convert automatically, "
" the v2 name_scope does not support re-entering scopes by"
" name.\n"))
# Rename to compat.v1
new_name = "tf.compat.v1.name_scope"
logs.append((ast_edits.INFO, node.func.lineno, node.func.col_offset,
"Renamed %r to %r" % (full_name, new_name)))
new_name_node = ast_edits.full_name_node(new_name, node.func.ctx)
ast.copy_location(new_name_node, node.func)
pasta.ast_utils.replace_child(node, node.func, new_name_node)
return node
if default_found:
# New name scope doesn't have name, but it has a default name. We use
# name=default_name, and values can be dropped (it's only for
# error reporting and useless outside of graph mode).
logs.append((ast_edits.INFO, node.lineno, node.col_offset,
"Using default_name as name in call to name_scope.\n"))
# Remove all args other than name
node.args = []
node.keywords = [ast.keyword(arg="name", value=default_name)]
return node
logs.append((ast_edits.ERROR, node.lineno, node.col_offset,
"name_scope call with neither name nor default_name cannot be "
"converted properly."))
def _rename_to_compat_v1(node, full_name, logs, reason):
new_name = six.ensure_str(full_name).replace("tf.", "tf.compat.v1.", 1)
return _rename_func(node, full_name, new_name, logs, reason)
def _rename_func(node, full_name, new_name, logs, reason):
logs.append((ast_edits.INFO, node.lineno, node.col_offset,
"Renamed %r to %r: %s" % (full_name, new_name, reason)))
new_name_node = ast_edits.full_name_node(new_name, node.func.ctx)
ast.copy_location(new_name_node, node.func)
pasta.ast_utils.replace_child(node, node.func, new_name_node)
return node
def _string_split_transformer(parent, node, full_name, name, logs):
"""Update tf.string_split arguments: skip_empty, sep, result_type, source."""
# Check the skip_empty parameter: if not false, then use compat.v1.
for i, kw in enumerate(node.keywords):
if kw.arg == "skip_empty":
if _is_ast_false(kw.value):
logs.append((ast_edits.INFO, node.lineno, node.col_offset,
"removed argument skip_empty for tf.string_split."))
node.keywords.pop(i)
break
else:
return _rename_to_compat_v1(
node, full_name, logs, "tf.string_split's replacement no longer "
"takes the skip_empty argument.")
# Check the sep parameter: if it's definitely an empty string, use
# tf.strings.bytes_split(). If we can't tell, then use compat.v1.
found_sep = False
for i, kw in enumerate(node.keywords):
if kw.arg == "sep":
found_sep = True
if isinstance(kw.value, ast.Str):
if kw.value.s == "":
node = _rename_func(
node, full_name, "tf.strings.bytes_split", logs,
"Splitting bytes is not handled by tf.strings.bytes_split().")
node.keywords.pop(i)
else:
return _rename_to_compat_v1(
node, full_name, logs,
"The semantics for tf.string_split's sep parameter have changed "
"when sep is the empty string; but sep is not a string literal, "
"so we can't tell if it's an empty string.")
if not found_sep:
return _rename_to_compat_v1(
node, full_name, logs,
"The semantics for tf.string_split's sep parameter have changed "
"when sep unspecified: it now splits on all whitespace, not just "
"the space character.")
# Check the result_type parameter
return _string_split_rtype_transformer(parent, node, full_name, name, logs)
def _string_split_rtype_transformer(parent, node, full_name, name, logs):
"""Update tf.strings.split arguments: result_type, source."""
# Remove the "result_type" argument.
need_to_sparse = True
for i, kw in enumerate(node.keywords):
if kw.arg == "result_type":
if (isinstance(kw.value, ast.Str) and
kw.value.s in ("RaggedTensor", "SparseTensor")):
logs.append((ast_edits.INFO, node.lineno, node.col_offset,
"Removed argument result_type=%r for function %s" %
(kw.value.s, full_name or name)))
node.keywords.pop(i)
if kw.value.s == "RaggedTensor":
need_to_sparse = False
else:
return _rename_to_compat_v1(
node, full_name, logs,
"%s no longer takes the result_type parameter." % full_name)
break
for i, kw in enumerate(node.keywords):
if kw.arg == "source":
kw.arg = "input"
# If necessary, add a call to .to_sparse() to convert the output of
# strings.split from a RaggedTensor to a SparseTensor.
if need_to_sparse:
if (isinstance(parent, ast.Attribute) and parent.attr == "to_sparse"):
return # Prevent infinite recursion (since child nodes are transformed)
logs.append(
(ast_edits.INFO, node.lineno, node.col_offset,
"Adding call to RaggedTensor.to_sparse() to result of strings.split, "
"since it now returns a RaggedTensor."))
node = ast.Attribute(value=copy.deepcopy(node), attr="to_sparse")
try:
node = ast.Call(node, [], [])
except TypeError:
node = ast.Call(node, [], [], None, None)
return node
|
tensorflow/tensorflow
|
tensorflow/tools/compatibility/tf_upgrade_v2.py
|
Python
|
apache-2.0
| 103,968
|
[
"Gaussian",
"VisIt"
] |
e816f1c820ad1148dd0e2e2b7ad27a6d3f02d3137a500df875569f81ba603f0a
|
# Pizza.py toolkit, www.cs.sandia.gov/~sjplimp/pizza.html
# Steve Plimpton, sjplimp@sandia.gov, Sandia National Laboratories
#
# Copyright (2005) Sandia Corporation. Under the terms of Contract
# DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains
# certain rights in this software. This software is distributed under
# the GNU General Public License.
# latgen tool
oneline = "Convert LAMMPS snapshots to latgen format"
docstr = """
x = latgen(d) d = object containing atom coords (dump, data)
x.one() write first snapshots to latgen
x.single(N) write snapshot for timestep N to latgen
x.single(N,"file") write snapshot for timestep N to file.vasp
"""
# History
# 8/05, Steve Plimpton (SNL): original version
# ToDo list
# Variables
# data = data file to read from
# Imports and external programs
import sys
import numpy as np
# Class definition
class latgen:
# --------------------------------------------------------------------
def __init__(self,data):
self.data = data
# --------------------------------------------------------------------
def one(self):
self.single(0)
# --------------------------------------------------------------------
def single(self,time,*args):
if len(args) == 0: file = "cell.in"
#elif args[0][-5:] == ".vasp": file = args[0]
else: file = args[0]
#self.data.scale()
which = self.data.findtime(time)
time,box,atoms,bonds,tris,lines = self.data.viz(which)
f = open(file,"w")
#print >>f,self.data.title,
print >>f,1.0 #lattice constant
xlo,ylo,zlo,xhi,yhi,zhi,xy,xz,yz=box[0],box[1],box[2],box[3],box[4],box[5],box[6],box[7],box[8]
lx=xhi-xlo
ly=yhi-ylo
lz=zhi-zlo
print >>f,"%f\t0.0\t0.0" % (lx)
print >>f,"%f\t%f\t0.0" % (xy,ly)
print >>f,"%f\t%f\t%f" % (xz,yz,lz)
#print >>f,"Cartesian"
#print >>f,len(atoms)
atoms=self.scale(box,atoms)
typesatom=[atom[1] for atom in atoms ]
types=list(set(typesatom))
ntype=len(types)
atomOfType={}
for type in types:
atomOfType[type]=[atom for atom in atoms if atom[1]==type]
for type in types:
print >>f,"%d\t" % (len(atomOfType[type])),
print >>f,""
for type in types:
for atom in atomOfType[type]:
itype = int(atom[1])
print >>f,atom[2],atom[3],atom[4]
f.close()
def scale(self,box,atoms):
xlo,ylo,zlo,xhi,yhi,zhi,xy,xz,yz=box[0],box[1],box[2],box[3],box[4],box[5],box[6],box[7],box[8]
atoms=np.array(atoms)
if 0 and xy == 0.0 and xz == 0.0 and yz == 0.0:
xprdinv = 1.0 / (xhi - xlo)
yprdinv = 1.0 / (yhi - ylo)
zprdinv = 1.0 / (zhi - zlo)
atoms[:,2] = (atoms[:,2] - xlo) * xprdinv
atoms[:,3] = (atoms[:,3] - ylo) * yprdinv
atoms[:,4] = (atoms[:,4] - zlo) * zprdinv
else:
h0 = xhi - xlo
h1 = yhi - ylo
h2 = zhi - zlo
h3 = yz
h4 = xz
h5 = xy
h0inv = 1.0 / h0
h1inv = 1.0 / h1
h2inv = 1.0 / h2
h3inv = yz / (h1*h2)
h4inv = (h3*h5 - h1*h4) / (h0*h1*h2)
h5inv = xy / (h0*h1)
atoms[:,2] = (atoms[:,2] - xlo)*h0inv + \
(atoms[:,3] - ylo)*h5inv + \
(atoms[:,4] - zlo)*h4inv
atoms[:,3] = (atoms[:,3] - ylo)*h1inv + \
(atoms[:,4] - zlo)*h3inv
atoms[:,4] = (atoms[:,4] - zlo)*h2inv
return atoms
|
vanceeasleaf/aces
|
aces/libs/pizza/latgen.py
|
Python
|
gpl-2.0
| 3,391
|
[
"LAMMPS",
"VASP"
] |
37f7b957250a2e053aa17b3385cf08a62329f0d2535f690866f28ef867a9f73c
|
import numpy as np
from ase.optimize.optimize import Dynamics
from ase.optimize.fire import FIRE
from ase.units import kB
from ase.parallel import world
from ase.io.trajectory import PickleTrajectory
class BasinHopping(Dynamics):
"""Basin hopping algorythm.
After Wales and Doye, J. Phys. Chem. A, vol 101 (1997) 5111-5116"""
def __init__(self, atoms,
temperature=100 * kB,
optimizer=FIRE,
fmax=0.1,
dr=0.1,
logfile='-',
trajectory='lowest.traj',
optimizer_logfile='-',
local_minima_trajectory='local_minima.traj',
adjust_cm=True):
Dynamics.__init__(self, atoms, logfile, trajectory)
self.kT = temperature
self.optimizer = optimizer
self.fmax = fmax
self.dr = dr
if adjust_cm:
self.cm = atoms.get_center_of_mass()
else:
self.cm = None
self.optimizer_logfile = optimizer_logfile
self.lm_trajectory = local_minima_trajectory
if isinstance(local_minima_trajectory, str):
self.lm_trajectory = PickleTrajectory(local_minima_trajectory,
'w', atoms)
self.initialize()
def initialize(self):
self.positions = 0.0 * self.atoms.get_positions()
self.Emin = self.get_energy(self.atoms.get_positions()) or 1.e32
self.rmin = self.atoms.get_positions()
self.positions = self.atoms.get_positions()
self.call_observers()
self.log(-1, self.Emin, self.Emin)
def run(self, steps):
"""Hop the basins for defined number of steps."""
ro = self.positions
Eo = self.get_energy(ro)
En = None
for step in range(steps):
while En is None:
rn = self.move(ro)
En = self.get_energy(rn)
if En < self.Emin:
# new minimum found
self.Emin = En
self.rmin = self.atoms.get_positions()
self.call_observers()
rn = self.rmin
self.log(step, En, self.Emin)
accept = np.exp((Eo - En) / self.kT) > np.random.uniform()
if accept:
ro = rn
Eo = En
def log(self, step, En, Emin):
if self.logfile is None:
return
name = self.__class__.__name__
self.logfile.write('%s: step %d, energy %15.6f, emin %15.6f\n'
% (name, step, En, Emin))
self.logfile.flush()
def move(self, ro):
"""Move atoms by a random step."""
atoms = self.atoms
# displace coordinates
disp = np.random.uniform(-1., 1., (len(atoms), 3))
rn = ro + self.dr * disp
atoms.set_positions(rn)
if self.cm is not None:
cm = atoms.get_center_of_mass()
atoms.translate(self.cm - cm)
rn = atoms.get_positions()
world.broadcast(rn, 0)
atoms.set_positions(rn)
return atoms.get_positions()
def get_minimum(self):
"""Return minimal energy and configuration."""
atoms = self.atoms.copy()
atoms.set_positions(self.rmin)
return self.Emin, atoms
def get_energy(self, positions):
"""Return the energy of the nearest local minimum."""
if np.sometrue(self.positions != positions):
self.positions = positions
self.atoms.set_positions(positions)
try:
opt = self.optimizer(self.atoms, logfile=self.optimizer_logfile)
opt.run(fmax=self.fmax)
if self.lm_trajectory is not None:
self.lm_trajectory.write(self.atoms)
self.energy = self.atoms.get_potential_energy()
except:
# Something went wrong.
# In GPAW the atoms are probably to near to each other.
return None
return self.energy
|
JConwayAWT/PGSS14CC
|
lib/python/multimetallics/ase/optimize/basin.py
|
Python
|
gpl-2.0
| 4,101
|
[
"ASE",
"GPAW"
] |
a72485d2bb87f5563db581b32e1117f4c491f278fcd465eec9a24a50e4bb137e
|
# coding=utf-8
"""The main BERT model and related functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import copy
import json
import math
import re
import six
import tensorflow as tf
class BertConfig(object):
"""Configuration for `BertModel`."""
def __init__(self,
vocab_size,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=16,
initializer_range=0.02):
"""Constructs BertConfig.
Args:
vocab_size: Vocabulary size of `inputs_ids` in `BertModel`.
hidden_size: Size of the encoder layers and the pooler layer.
num_hidden_layers: Number of hidden layers in the Transformer encoder.
num_attention_heads: Number of attention heads for each attention layer in
the Transformer encoder.
intermediate_size: The size of the "intermediate" (i.e., feed-forward)
layer in the Transformer encoder.
hidden_act: The non-linear activation function (function or string) in the
encoder and pooler.
hidden_dropout_prob: The dropout probability for all fully connected
layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob: The dropout ratio for the attention
probabilities.
max_position_embeddings: The maximum sequence length that this model might
ever be used with. Typically set this to something large just in case
(e.g., 512 or 1024 or 2048).
type_vocab_size: The vocabulary size of the `token_type_ids` passed into
`BertModel`.
initializer_range: The stdev of the truncated_normal_initializer for
initializing all weight matrices.
"""
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
@classmethod
def from_dict(cls, json_object):
"""Constructs a `BertConfig` from a Python dictionary of parameters."""
config = BertConfig(vocab_size=None)
for (key, value) in six.iteritems(json_object):
config.__dict__[key] = value
return config
@classmethod
def from_json_file(cls, json_file):
"""Constructs a `BertConfig` from a json file of parameters."""
with tf.gfile.GFile(json_file, "r") as reader:
text = reader.read()
return cls.from_dict(json.loads(text))
def to_dict(self):
"""Serializes this instance to a Python dictionary."""
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
"""Serializes this instance to a JSON string."""
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
class BertModel(object):
"""BERT model ("Bidirectional Embedding Representations from a Transformer").
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = tf.constant([[31, 51, 99], [15, 5, 0]])
input_mask = tf.constant([[1, 1, 1], [1, 1, 0]])
token_type_ids = tf.constant([[0, 0, 1], [0, 2, 0]])
config = modeling.BertConfig(vocab_size=32000, hidden_size=512,
num_hidden_layers=8, num_attention_heads=6, intermediate_size=1024)
model = modeling.BertModel(config=config, is_training=True,
input_ids=input_ids, input_mask=input_mask, token_type_ids=token_type_ids)
label_embeddings = tf.get_variable(...)
logits = tf.matmul(pooled_output, label_embeddings)
...
```
"""
def __init__(self,
config,
is_training,
input_ids,
input_mask=None,
token_type_ids=None,
use_one_hot_embeddings=True,
scope=None):
"""Constructor for BertModel.
Args:
config: `BertConfig` instance.
is_training: bool. rue for training model, false for eval model. Controls
whether dropout will be applied.
input_ids: int32 Tensor of shape [batch_size, seq_length].
input_mask: (optional) int32 Tensor of shape [batch_size, seq_length].
token_type_ids: (optional) int32 Tensor of shape [batch_size, seq_length].
use_one_hot_embeddings: (optional) bool. Whether to use one-hot word
embeddings or tf.embedding_lookup() for the word embeddings. On the TPU,
it is must faster if this is True, on the CPU or GPU, it is faster if
this is False.
scope: (optional) variable scope. Defaults to "bert".
Raises:
ValueError: The config is invalid or one of the input tensor shapes
is invalid.
"""
config = copy.deepcopy(config)
if not is_training:
config.hidden_dropout_prob = 0.0
config.attention_probs_dropout_prob = 0.0
input_shape = get_shape_list(input_ids, expected_rank=2)
batch_size = input_shape[0]
seq_length = input_shape[1]
if input_mask is None:
input_mask = tf.ones(
shape=[batch_size, seq_length], dtype=tf.int32)
if token_type_ids is None:
token_type_ids = tf.zeros(
shape=[batch_size, seq_length], dtype=tf.int32)
with tf.variable_scope(scope, default_name="bert"):
with tf.variable_scope("embeddings"):
# Perform embedding lookup on the word ids.
(self.embedding_output,
self.embedding_table) = embedding_lookup(
input_ids=input_ids,
vocab_size=config.vocab_size,
embedding_size=config.hidden_size,
initializer_range=config.initializer_range,
word_embedding_name="word_embeddings",
use_one_hot_embeddings=use_one_hot_embeddings)
# Add positional embeddings and token type embeddings, then layer
# normalize and perform dropout.
self.embedding_output = embedding_postprocessor(
input_tensor=self.embedding_output,
use_token_type=True,
token_type_ids=token_type_ids,
token_type_vocab_size=config.type_vocab_size,
token_type_embedding_name="token_type_embeddings",
use_position_embeddings=True,
position_embedding_name="position_embeddings",
initializer_range=config.initializer_range,
max_position_embeddings=config.max_position_embeddings,
dropout_prob=config.hidden_dropout_prob)
with tf.variable_scope("encoder"):
# This converts a 2D mask of shape [batch_size, seq_length] to a 3D
# mask of shape [batch_size, seq_length, seq_length] which is used
# for the attention scores.
attention_mask = create_attention_mask_from_input_mask(
input_ids, input_mask)
# Run the stacked transformer.
# `sequence_output` shape = [batch_size, seq_length, hidden_size].
self.all_encoder_layers = transformer_model(
input_tensor=self.embedding_output,
attention_mask=attention_mask,
hidden_size=config.hidden_size,
num_hidden_layers=config.num_hidden_layers,
num_attention_heads=config.num_attention_heads,
intermediate_size=config.intermediate_size,
intermediate_act_fn=get_activation(config.hidden_act),
hidden_dropout_prob=config.hidden_dropout_prob,
attention_probs_dropout_prob=config.
attention_probs_dropout_prob,
initializer_range=config.initializer_range,
do_return_all_layers=True)
self.sequence_output = self.all_encoder_layers[-1]
# The "pooler" converts the encoded sequence tensor of shape
# [batch_size, seq_length, hidden_size] to a tensor of shape
# [batch_size, hidden_size]. This is necessary for segment-level
# (or segment-pair-level) classification tasks where we need a fixed
# dimensional representation of the segment.
# with tf.variable_scope("pooler"):
# # We "pool" the model by simply taking the hidden state corresponding
# # to the first token. We assume that this has been pre-trained
# first_token_tensor = tf.squeeze(
# self.sequence_output[:, 0:1, :], axis=1)
# self.pooled_output = tf.layers.dense(
# first_token_tensor,
# config.hidden_size,
# activation=tf.tanh,
# kernel_initializer=create_initializer(
# config.initializer_range))
def get_pooled_output(self):
return self.pooled_output
def get_sequence_output(self):
"""Gets final hidden layer of encoder.
Returns:
float Tensor of shape [batch_size, seq_length, hidden_size] corresponding
to the final hidden of the transformer encoder.
"""
return self.sequence_output
def get_all_encoder_layers(self):
return self.all_encoder_layers
def get_embedding_output(self):
"""Gets output of the embedding lookup (i.e., input to the transformer).
Returns:
float Tensor of shape [batch_size, seq_length, hidden_size] corresponding
to the output of the embedding layer, after summing the word
embeddings with the positional embeddings and the token type embeddings,
then performing layer normalization. This is the input to the transformer.
"""
return self.embedding_output
def get_embedding_table(self):
return self.embedding_table
def gelu(input_tensor):
"""Gaussian Error Linear Unit.
This is a smoother version of the RELU.
Original paper: https://arxiv.org/abs/1606.08415
Args:
input_tensor: float Tensor to perform activation.
Returns:
`input_tensor` with the GELU activation applied.
"""
cdf = 0.5 * (1.0 + tf.erf(input_tensor / tf.sqrt(2.0)))
return input_tensor * cdf
def get_activation(activation_string):
"""Maps a string to a Python function, e.g., "relu" => `tf.nn.relu`.
Args:
activation_string: String name of the activation function.
Returns:
A Python function corresponding to the activation function. If
`activation_string` is None, empty, or "linear", this will return None.
If `activation_string` is not a string, it will return `activation_string`.
Raises:
ValueError: The `activation_string` does not correspond to a known
activation.
"""
# We assume that anything that"s not a string is already an activation
# function, so we just return it.
if not isinstance(activation_string, six.string_types):
return activation_string
if not activation_string:
return None
act = activation_string.lower()
if act == "linear":
return None
elif act == "relu":
return tf.nn.relu
elif act == "gelu":
return gelu
elif act == "tanh":
return tf.tanh
else:
raise ValueError("Unsupported activation: %s" % act)
def get_assignment_map_from_checkpoint(tvars, init_checkpoint):
"""Compute the union of the current variables and checkpoint variables."""
assignment_map = {}
initialized_variable_names = {}
name_to_variable = collections.OrderedDict()
for var in tvars:
name = var.name
m = re.match("^(.*):\\d+$", name)
if m is not None:
name = m.group(1)
name_to_variable[name] = var
init_vars = tf.train.list_variables(init_checkpoint)
assignment_map = collections.OrderedDict()
for x in init_vars:
(name, var) = (x[0], x[1])
if name not in name_to_variable:
continue
assignment_map[name] = name
initialized_variable_names[name] = 1
initialized_variable_names[name + ":0"] = 1
return (assignment_map, initialized_variable_names)
def dropout(input_tensor, dropout_prob):
"""Perform dropout.
Args:
input_tensor: float Tensor.
dropout_prob: Python float. The probability of dropping out a value (NOT of
*keeping* a dimension as in `tf.nn.dropout`).
Returns:
A version of `input_tensor` with dropout applied.
"""
if dropout_prob is None or dropout_prob == 0.0:
return input_tensor
output = tf.nn.dropout(input_tensor, 1.0 - dropout_prob)
return output
def layer_norm(input_tensor, name=None):
"""Run layer normalization on the last dimension of the tensor."""
return tf.contrib.layers.layer_norm(
inputs=input_tensor,
begin_norm_axis=-1,
begin_params_axis=-1,
scope=name)
def layer_norm_and_dropout(input_tensor, dropout_prob, name=None):
"""Runs layer normalization followed by dropout."""
output_tensor = layer_norm(input_tensor, name)
output_tensor = dropout(output_tensor, dropout_prob)
return output_tensor
def create_initializer(initializer_range=0.02):
"""Creates a `truncated_normal_initializer` with the given range."""
return tf.truncated_normal_initializer(stddev=initializer_range)
def embedding_lookup(input_ids,
vocab_size,
embedding_size=128,
initializer_range=0.02,
word_embedding_name="word_embeddings",
use_one_hot_embeddings=False):
"""Looks up words embeddings for id tensor.
Args:
input_ids: int32 Tensor of shape [batch_size, seq_length] containing word
ids.
vocab_size: int. Size of the embedding vocabulary.
embedding_size: int. Width of the word embeddings.
initializer_range: float. Embedding initialization range.
word_embedding_name: string. Name of the embedding table.
use_one_hot_embeddings: bool. If True, use one-hot method for word
embeddings. If False, use `tf.nn.embedding_lookup()`. One hot is better
for TPUs.
Returns:
float Tensor of shape [batch_size, seq_length, embedding_size].
"""
# This function assumes that the input is of shape [batch_size, seq_length,
# num_inputs].
#
# If the input is a 2D tensor of shape [batch_size, seq_length], we
# reshape to [batch_size, seq_length, 1].
if input_ids.shape.ndims == 2:
input_ids = tf.expand_dims(input_ids, axis=[-1])
embedding_table = tf.get_variable(
name=word_embedding_name,
shape=[vocab_size, embedding_size],
initializer=create_initializer(initializer_range))
if use_one_hot_embeddings:
flat_input_ids = tf.reshape(input_ids, [-1])
one_hot_input_ids = tf.one_hot(flat_input_ids, depth=vocab_size)
output = tf.matmul(one_hot_input_ids, embedding_table)
else:
output = tf.nn.embedding_lookup(embedding_table, input_ids)
input_shape = get_shape_list(input_ids)
output = tf.reshape(output,
input_shape[0:-1] + [input_shape[-1] * embedding_size])
return (output, embedding_table)
def embedding_postprocessor(input_tensor,
use_token_type=False,
token_type_ids=None,
token_type_vocab_size=16,
token_type_embedding_name="token_type_embeddings",
use_position_embeddings=True,
position_embedding_name="position_embeddings",
initializer_range=0.02,
max_position_embeddings=512,
dropout_prob=0.1):
"""Performs various post-processing on a word embedding tensor.
Args:
input_tensor: float Tensor of shape [batch_size, seq_length,
embedding_size].
use_token_type: bool. Whether to add embeddings for `token_type_ids`.
token_type_ids: (optional) int32 Tensor of shape [batch_size, seq_length].
Must be specified if `use_token_type` is True.
token_type_vocab_size: int. The vocabulary size of `token_type_ids`.
token_type_embedding_name: string. The name of the embedding table variable
for token type ids.
use_position_embeddings: bool. Whether to add position embeddings for the
position of each token in the sequence.
position_embedding_name: string. The name of the embedding table variable
for positional embeddings.
initializer_range: float. Range of the weight initialization.
max_position_embeddings: int. Maximum sequence length that might ever be
used with this model. This can be longer than the sequence length of
input_tensor, but cannot be shorter.
dropout_prob: float. Dropout probability applied to the final output tensor.
Returns:
float tensor with same shape as `input_tensor`.
Raises:
ValueError: One of the tensor shapes or input values is invalid.
"""
input_shape = get_shape_list(input_tensor, expected_rank=3)
batch_size = input_shape[0]
seq_length = input_shape[1]
width = input_shape[2]
output = input_tensor
if use_token_type:
if token_type_ids is None:
raise ValueError("`token_type_ids` must be specified if"
"`use_token_type` is True.")
token_type_table = tf.get_variable(
name=token_type_embedding_name,
shape=[token_type_vocab_size, width],
initializer=create_initializer(initializer_range))
# This vocab will be small so we always do one-hot here, since it is always
# faster for a small vocabulary.
flat_token_type_ids = tf.reshape(token_type_ids, [-1])
one_hot_ids = tf.one_hot(
flat_token_type_ids, depth=token_type_vocab_size)
token_type_embeddings = tf.matmul(one_hot_ids, token_type_table)
token_type_embeddings = tf.reshape(token_type_embeddings,
[batch_size, seq_length, width])
output += token_type_embeddings
if use_position_embeddings:
assert_op = tf.assert_less_equal(seq_length, max_position_embeddings)
with tf.control_dependencies([assert_op]):
full_position_embeddings = tf.get_variable(
name=position_embedding_name,
shape=[max_position_embeddings, width],
initializer=create_initializer(initializer_range))
# Since the position embedding table is a learned variable, we create it
# using a (long) sequence length `max_position_embeddings`. The actual
# sequence length might be shorter than this, for faster training of
# tasks that do not have long sequences.
#
# So `full_position_embeddings` is effectively an embedding table
# for position [0, 1, 2, ..., max_position_embeddings-1], and the current
# sequence has positions [0, 1, 2, ... seq_length-1], so we can just
# perform a slice.
position_embeddings = tf.slice(full_position_embeddings, [0, 0],
[seq_length, -1])
num_dims = len(output.shape.as_list())
# Only the last two dimensions are relevant (`seq_length` and `width`), so
# we broadcast among the first dimensions, which is typically just
# the batch size.
position_broadcast_shape = []
for _ in range(num_dims - 2):
position_broadcast_shape.append(1)
position_broadcast_shape.extend([seq_length, width])
position_embeddings = tf.reshape(position_embeddings,
position_broadcast_shape)
output += position_embeddings
output = layer_norm_and_dropout(output, dropout_prob)
return output
def create_attention_mask_from_input_mask(from_tensor, to_mask):
"""Create 3D attention mask from a 2D tensor mask.
Args:
from_tensor: 2D or 3D Tensor of shape [batch_size, from_seq_length, ...].
to_mask: int32 Tensor of shape [batch_size, to_seq_length].
Returns:
float Tensor of shape [batch_size, from_seq_length, to_seq_length].
"""
from_shape = get_shape_list(from_tensor, expected_rank=[2, 3])
batch_size = from_shape[0]
from_seq_length = from_shape[1]
to_shape = get_shape_list(to_mask, expected_rank=2)
to_seq_length = to_shape[1]
to_mask = tf.cast(
tf.reshape(to_mask, [batch_size, 1, to_seq_length]), tf.float32)
# We don't assume that `from_tensor` is a mask (although it could be). We
# don't actually care if we attend *from* padding tokens (only *to* padding)
# tokens so we create a tensor of all ones.
#
# `broadcast_ones` = [batch_size, from_seq_length, 1]
broadcast_ones = tf.ones(
shape=[batch_size, from_seq_length, 1], dtype=tf.float32)
# Here we broadcast along two dimensions to create the mask.
mask = broadcast_ones * to_mask
return mask
def attention_layer(from_tensor,
to_tensor,
attention_mask=None,
num_attention_heads=1,
size_per_head=512,
query_act=None,
key_act=None,
value_act=None,
attention_probs_dropout_prob=0.0,
initializer_range=0.02,
do_return_2d_tensor=False,
batch_size=None,
from_seq_length=None,
to_seq_length=None):
"""Performs multi-headed attention from `from_tensor` to `to_tensor`.
This is an implementation of multi-headed attention based on "Attention
is all you Need". If `from_tensor` and `to_tensor` are the same, then
this is self-attention. Each timestep in `from_tensor` attends to the
corresponding sequence in `to_tensor`, and returns a fixed-with vector.
This function first projects `from_tensor` into a "query" tensor and
`to_tensor` into "key" and "value" tensors. These are (effectively) a list
of tensors of length `num_attention_heads`, where each tensor is of shape
[batch_size, seq_length, size_per_head].
Then, the query and key tensors are dot-producted and scaled. These are
softmaxed to obtain attention probabilities. The value tensors are then
interpolated by these probabilities, then concatenated back to a single
tensor and returned.
In practice, the multi-headed attention are done with transposes and
reshapes rather than actual separate tensors.
Args:
from_tensor: float Tensor of shape [batch_size, from_seq_length,
from_width].
to_tensor: float Tensor of shape [batch_size, to_seq_length, to_width].
attention_mask: (optional) int32 Tensor of shape [batch_size,
from_seq_length, to_seq_length]. The values should be 1 or 0. The
attention scores will effectively be set to -infinity for any positions in
the mask that are 0, and will be unchanged for positions that are 1.
num_attention_heads: int. Number of attention heads.
size_per_head: int. Size of each attention head.
query_act: (optional) Activation function for the query transform.
key_act: (optional) Activation function for the key transform.
value_act: (optional) Activation function for the value transform.
attention_probs_dropout_prob: (optional) float. Dropout probability of the
attention probabilities.
initializer_range: float. Range of the weight initializer.
do_return_2d_tensor: bool. If True, the output will be of shape [batch_size
* from_seq_length, num_attention_heads * size_per_head]. If False, the
output will be of shape [batch_size, from_seq_length, num_attention_heads
* size_per_head].
batch_size: (Optional) int. If the input is 2D, this might be the batch size
of the 3D version of the `from_tensor` and `to_tensor`.
from_seq_length: (Optional) If the input is 2D, this might be the seq length
of the 3D version of the `from_tensor`.
to_seq_length: (Optional) If the input is 2D, this might be the seq length
of the 3D version of the `to_tensor`.
Returns:
float Tensor of shape [batch_size, from_seq_length,
num_attention_heads * size_per_head]. (If `do_return_2d_tensor` is
true, this will be of shape [batch_size * from_seq_length,
num_attention_heads * size_per_head]).
Raises:
ValueError: Any of the arguments or tensor shapes are invalid.
"""
def transpose_for_scores(input_tensor, batch_size, num_attention_heads,
seq_length, width):
output_tensor = tf.reshape(
input_tensor, [batch_size, seq_length, num_attention_heads, width])
output_tensor = tf.transpose(output_tensor, [0, 2, 1, 3])
return output_tensor
from_shape = get_shape_list(from_tensor, expected_rank=[2, 3])
to_shape = get_shape_list(to_tensor, expected_rank=[2, 3])
if len(from_shape) != len(to_shape):
raise ValueError(
"The rank of `from_tensor` must match the rank of `to_tensor`.")
if len(from_shape) == 3:
batch_size = from_shape[0]
from_seq_length = from_shape[1]
to_seq_length = to_shape[1]
elif len(from_shape) == 2:
if (batch_size is None or from_seq_length is None
or to_seq_length is None):
raise ValueError(
"When passing in rank 2 tensors to attention_layer, the values "
"for `batch_size`, `from_seq_length`, and `to_seq_length` "
"must all be specified.")
# Scalar dimensions referenced here:
# B = batch size (number of sequences)
# F = `from_tensor` sequence length
# T = `to_tensor` sequence length
# N = `num_attention_heads`
# H = `size_per_head`
from_tensor_2d = reshape_to_matrix(from_tensor)
to_tensor_2d = reshape_to_matrix(to_tensor)
# `query_layer` = [B*F, N*H]
query_layer = tf.layers.dense(
from_tensor_2d,
num_attention_heads * size_per_head,
activation=query_act,
name="query",
kernel_initializer=create_initializer(initializer_range))
# `key_layer` = [B*T, N*H]
key_layer = tf.layers.dense(
to_tensor_2d,
num_attention_heads * size_per_head,
activation=key_act,
name="key",
kernel_initializer=create_initializer(initializer_range))
# `value_layer` = [B*T, N*H]
value_layer = tf.layers.dense(
to_tensor_2d,
num_attention_heads * size_per_head,
activation=value_act,
name="value",
kernel_initializer=create_initializer(initializer_range))
# `query_layer` = [B, N, F, H]
query_layer = transpose_for_scores(query_layer, batch_size,
num_attention_heads, from_seq_length,
size_per_head)
# `key_layer` = [B, N, T, H]
key_layer = transpose_for_scores(key_layer, batch_size,
num_attention_heads, to_seq_length,
size_per_head)
# Take the dot product between "query" and "key" to get the raw
# attention scores.
# `attention_scores` = [B, N, F, T]
attention_scores = tf.matmul(query_layer, key_layer, transpose_b=True)
attention_scores = tf.multiply(attention_scores,
1.0 / math.sqrt(float(size_per_head)))
if attention_mask is not None:
# `attention_mask` = [B, 1, F, T]
attention_mask = tf.expand_dims(attention_mask, axis=[1])
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
adder = (1.0 - tf.cast(attention_mask, tf.float32)) * -10000.0
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
attention_scores += adder
# Normalize the attention scores to probabilities.
# `attention_probs` = [B, N, F, T]
attention_probs = tf.nn.softmax(attention_scores)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = dropout(attention_probs, attention_probs_dropout_prob)
# `value_layer` = [B, T, N, H]
value_layer = tf.reshape(
value_layer,
[batch_size, to_seq_length, num_attention_heads, size_per_head])
# `value_layer` = [B, N, T, H]
value_layer = tf.transpose(value_layer, [0, 2, 1, 3])
# `context_layer` = [B, N, F, H]
context_layer = tf.matmul(attention_probs, value_layer)
# `context_layer` = [B, F, N, H]
context_layer = tf.transpose(context_layer, [0, 2, 1, 3])
if do_return_2d_tensor:
# `context_layer` = [B*F, N*V]
context_layer = tf.reshape(context_layer, [
batch_size * from_seq_length, num_attention_heads * size_per_head
])
else:
# `context_layer` = [B, F, N*V]
context_layer = tf.reshape(
context_layer,
[batch_size, from_seq_length, num_attention_heads * size_per_head])
return context_layer
def transformer_model(input_tensor,
attention_mask=None,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
intermediate_act_fn=gelu,
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
initializer_range=0.02,
do_return_all_layers=False):
"""Multi-headed, multi-layer Transformer from "Attention is All You Need".
This is almost an exact implementation of the original Transformer encoder.
See the original paper:
https://arxiv.org/abs/1706.03762
Also see:
https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/models/transformer.py
Args:
input_tensor: float Tensor of shape [batch_size, seq_length, hidden_size].
attention_mask: (optional) int32 Tensor of shape [batch_size, seq_length,
seq_length], with 1 for positions that can be attended to and 0 in
positions that should not be.
hidden_size: int. Hidden size of the Transformer.
num_hidden_layers: int. Number of layers (blocks) in the Transformer.
num_attention_heads: int. Number of attention heads in the Transformer.
intermediate_size: int. The size of the "intermediate" (a.k.a., feed
forward) layer.
intermediate_act_fn: function. The non-linear activation function to apply
to the output of the intermediate/feed-forward layer.
hidden_dropout_prob: float. Dropout probability for the hidden layers.
attention_probs_dropout_prob: float. Dropout probability of the attention
probabilities.
initializer_range: float. Range of the initializer (stddev of truncated
normal).
do_return_all_layers: Whether to also return all layers or just the final
layer.
Returns:
float Tensor of shape [batch_size, seq_length, hidden_size], the final
hidden layer of the Transformer.
Raises:
ValueError: A Tensor shape or parameter is invalid.
"""
if hidden_size % num_attention_heads != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (hidden_size, num_attention_heads))
attention_head_size = int(hidden_size / num_attention_heads)
input_shape = get_shape_list(input_tensor, expected_rank=3)
batch_size = input_shape[0]
seq_length = input_shape[1]
input_width = input_shape[2]
# The Transformer performs sum residuals on all layers so the input needs
# to be the same as the hidden size.
if input_width != hidden_size:
raise ValueError(
"The width of the input tensor (%d) != hidden size (%d)" %
(input_width, hidden_size))
# We keep the representation as a 2D tensor to avoid re-shaping it back and
# forth from a 3D tensor to a 2D tensor. Re-shapes are normally free on
# the GPU/CPU but may not be free on the TPU, so we want to minimize them to
# help the optimizer.
prev_output = reshape_to_matrix(input_tensor)
all_layer_outputs = []
for layer_idx in range(num_hidden_layers):
with tf.variable_scope("layer_%d" % layer_idx):
layer_input = prev_output
with tf.variable_scope("attention"):
attention_heads = []
with tf.variable_scope("self"):
attention_head = attention_layer(
from_tensor=layer_input,
to_tensor=layer_input,
attention_mask=attention_mask,
num_attention_heads=num_attention_heads,
size_per_head=attention_head_size,
attention_probs_dropout_prob=
attention_probs_dropout_prob,
initializer_range=initializer_range,
do_return_2d_tensor=True,
batch_size=batch_size,
from_seq_length=seq_length,
to_seq_length=seq_length)
attention_heads.append(attention_head)
attention_output = None
if len(attention_heads) == 1:
attention_output = attention_heads[0]
else:
# In the case where we have other sequences, we just concatenate
# them to the self-attention head before the projection.
attention_output = tf.concat(attention_heads, axis=-1)
# Run a linear projection of `hidden_size` then add a residual
# with `layer_input`.
with tf.variable_scope("output"):
attention_output = tf.layers.dense(
attention_output,
hidden_size,
kernel_initializer=create_initializer(
initializer_range))
attention_output = dropout(attention_output,
hidden_dropout_prob)
attention_output = layer_norm(attention_output +
layer_input)
# The activation is only applied to the "intermediate" hidden layer.
with tf.variable_scope("intermediate"):
intermediate_output = tf.layers.dense(
attention_output,
intermediate_size,
activation=intermediate_act_fn,
kernel_initializer=create_initializer(initializer_range))
# Down-project back to `hidden_size` then add the residual.
with tf.variable_scope("output"):
layer_output = tf.layers.dense(
intermediate_output,
hidden_size,
kernel_initializer=create_initializer(initializer_range))
layer_output = dropout(layer_output, hidden_dropout_prob)
layer_output = layer_norm(layer_output + attention_output)
prev_output = layer_output
all_layer_outputs.append(layer_output)
if do_return_all_layers:
final_outputs = []
for layer_output in all_layer_outputs:
final_output = reshape_from_matrix(layer_output, input_shape)
final_outputs.append(final_output)
return final_outputs
else:
final_output = reshape_from_matrix(prev_output, input_shape)
return final_output
def get_shape_list(tensor, expected_rank=None, name=None):
"""Returns a list of the shape of tensor, preferring static dimensions.
Args:
tensor: A tf.Tensor object to find the shape of.
expected_rank: (optional) int. The expected rank of `tensor`. If this is
specified and the `tensor` has a different rank, and exception will be
thrown.
name: Optional name of the tensor for the error message.
Returns:
A list of dimensions of the shape of tensor. All static dimensions will
be returned as python integers, and dynamic dimensions will be returned
as tf.Tensor scalars.
"""
if name is None:
name = tensor.name
if expected_rank is not None:
assert_rank(tensor, expected_rank, name)
shape = tensor.shape.as_list()
non_static_indexes = []
for (index, dim) in enumerate(shape):
if dim is None:
non_static_indexes.append(index)
if not non_static_indexes:
return shape
dyn_shape = tf.shape(tensor)
for index in non_static_indexes:
shape[index] = dyn_shape[index]
return shape
def reshape_to_matrix(input_tensor):
"""Reshapes a >= rank 2 tensor to a rank 2 tensor (i.e., a matrix)."""
ndims = input_tensor.shape.ndims
if ndims < 2:
raise ValueError("Input tensor must have at least rank 2. Shape = %s" %
(input_tensor.shape))
if ndims == 2:
return input_tensor
width = input_tensor.shape[-1]
output_tensor = tf.reshape(input_tensor, [-1, width])
return output_tensor
def reshape_from_matrix(output_tensor, orig_shape_list):
"""Reshapes a rank 2 tensor back to its original rank >= 2 tensor."""
if len(orig_shape_list) == 2:
return output_tensor
output_shape = get_shape_list(output_tensor)
orig_dims = orig_shape_list[0:-1]
width = output_shape[-1]
return tf.reshape(output_tensor, orig_dims + [width])
def assert_rank(tensor, expected_rank, name=None):
"""Raises an exception if the tensor rank is not of the expected rank.
Args:
tensor: A tf.Tensor to check the rank of.
expected_rank: Python integer or list of integers, expected rank.
name: Optional name of the tensor for the error message.
Raises:
ValueError: If the expected shape doesn't match the actual shape.
"""
if name is None:
name = tensor.name
expected_rank_dict = {}
if isinstance(expected_rank, six.integer_types):
expected_rank_dict[expected_rank] = True
else:
for x in expected_rank:
expected_rank_dict[x] = True
actual_rank = tensor.shape.ndims
if actual_rank not in expected_rank_dict:
scope_name = tf.get_variable_scope().name
raise ValueError(
"For the tensor `%s` in scope `%s`, the actual rank "
"`%d` (shape = %s) is not equal to the expected rank `%s`" %
(name, scope_name, actual_rank, str(tensor.shape),
str(expected_rank)))
|
FeiSun/BERT4Rec
|
modeling.py
|
Python
|
apache-2.0
| 40,615
|
[
"Gaussian"
] |
c1d2502dca88d2c78dc23517c1d5e23b8bdb523ce3c93d7385d108cfe83fe8c0
|
###############################
# This file is part of PyLaDa.
#
# Copyright (C) 2013 National Renewable Energy Lab
#
# PyLaDa is a high throughput computational platform for Physics. It aims to make it easier to submit
# large numbers of jobs on supercomputers. It provides a python interface to physical input, such as
# crystal structures, as well as to a number of DFT (VASP, CRYSTAL) and atomic potential programs. It
# is able to organise and launch computational jobs on PBS and SLURM.
#
# PyLaDa is free software: you can redistribute it and/or modify it under the terms of the GNU General
# Public License as published by the Free Software Foundation, either version 3 of the License, or (at
# your option) any later version.
#
# PyLaDa is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even
# the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along with PyLaDa. If not, see
# <http://www.gnu.org/licenses/>.
###############################
from pytest import fixture
@fixture
def doplacement():
import pylada
old = pylada.do_multiple_mpi_programs
pylada.do_multiple_mpi_programs = True
yield True
pylada.do_multiple_mpi_programs = old
def test_mpicomm(doplacement):
""" Test MPI Communicator. """
from pylada.process.mpi import Communicator, MPISizeError
root = Communicator(n=32)
for i in range(4):
root.machines["node0{0}".format(i)] = 8
newcomm = root.lend(5)
assert newcomm['n'] == 5
assert newcomm.parent() is root
assert len(newcomm.machines) == 1
assert root.machines[list(newcomm.machines.keys())[0]] == 3
assert root['n'] == 27
newcomm.cleanup()
assert newcomm['n'] == 0
assert len(newcomm.machines) == 0
assert root['n'] == 32
assert all(u == 8 for u in root.machines.values())
newcomm = root.lend(8)
assert newcomm['n'] == 8
assert sum(newcomm.machines.values()) == newcomm['n']
assert newcomm.parent() is root
assert len(newcomm.machines) == 1
key = list(newcomm.machines.keys())[0]
assert key not in root.machines
assert newcomm.machines[key] == 8
assert root['n'] == 24
newcomm.cleanup()
assert newcomm['n'] == 0
assert len(newcomm.machines) == 0
assert root['n'] == 32
assert all(u == 8 for u in root.machines.values())
newcomm = root.lend(12)
assert newcomm['n'] == 12
assert sum(newcomm.machines.values()) == newcomm['n']
assert newcomm.parent() is root
assert len(newcomm.machines) == 2
key0, key1 = newcomm.machines.keys()
if newcomm.machines[key0] != 8:
key0, key1 = key1, key0
assert newcomm.machines[key0] == 8
assert newcomm.machines[key1] == 4
assert key0 not in root.machines
assert root.machines[key1] == 4
assert root['n'] == 20
newcomm.cleanup()
assert newcomm['n'] == 0
assert len(newcomm.machines) == 0
assert root['n'] == 32
assert all(u == 8 for u in root.machines.values())
comms = root.split(4)
assert root['n'] == 0
assert len(root.machines) == 0
machines = []
for comm in comms:
assert comm['n'] == 8
assert sum(comm.machines.values()) == comm['n']
assert len(comm.machines) == 1
assert list(comm.machines.keys())[0] not in machines
machines.append(list(comm.machines.keys())[0])
for comm in comms:
comm.cleanup()
assert root['n'] == 32
assert all(u == 8 for u in root.machines.values())
comms = root.split(5)
assert root['n'] == 0
assert len(root.machines) == 0
machines = {}
for comm in comms:
assert comm['n'] in [6, 7]
assert sum(comm.machines.values()) == comm['n']
for key, value in comm.machines.items():
if key not in machines:
machines[key] = value
else:
machines[key] += value
assert sum(machines.values()) == 32
assert all(u == 8 for u in machines.values())
for comm in comms:
comm.cleanup()
assert root['n'] == 32
assert all(u == 8 for u in root.machines.values())
comms = root.split(3)
assert root['n'] == 0
assert len(root.machines) == 0
machines = {}
for comm in comms:
assert comm.parent() is root
assert comm['n'] in [10, 11]
assert sum(comm.machines.values()) == comm['n']
for key, value in comm.machines.items():
if key not in machines:
machines[key] = value
else:
machines[key] += value
assert sum(machines.values()) == 32
assert all(u == 8 for u in machines.values())
machines = comms[0].machines.copy()
for key, value in comms[1].machines.items():
if key in machines:
machines[key] += value
else:
machines[key] = value
comm = comms.pop(0)
comms[0].acquire(comm)
assert comm.parent is None
assert comm['n'] == 0
assert len(comm.machines) == 0
assert comms[0].parent() is root
assert comms[0]['n'] == sum(machines.values())
assert comms[0]['n'] == sum(comms[0].machines.values())
for key in machines:
assert machines[key] == comms[0].machines[key]
for key in comms[0].machines:
assert machines[key] == comms[0].machines[key]
for comm in comms:
comm.cleanup()
assert root['n'] == 32
assert all(u == 8 for u in root.machines.values())
try:
comm.lend(33)
except MPISizeError:
pass
else:
raise Exception()
try:
comm.split(33)
except MPISizeError:
pass
else:
raise Exception()
if __name__ == "__main__":
from sys import argv, path
from os.path import abspath
if len(argv) > 1:
path.extend(argv[1:])
test()
|
pylada/pylada-light
|
tests/process/test_mpi.py
|
Python
|
gpl-3.0
| 5,949
|
[
"CRYSTAL",
"VASP"
] |
8b4709478fd52465a6e6ce56427b9c575e156ad07ff38c52bcc89d07aedff439
|
# Copyright (c) 2014-2015 Brett Cannon <brett@python.org>
# Copyright (c) 2014-2016 Claudiu Popa <pcmanticore@gmail.com>
# Copyright (c) 2015 Pavel Roskin <proski@gnu.org>
# Licensed under the GPL: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html
# For details: https://github.com/PyCQA/pylint/blob/master/COPYING
"""Check Python 2 code for Python 2/3 source-compatible issues."""
from __future__ import absolute_import, print_function
import re
import sys
import tokenize
from collections import namedtuple
import six
import astroid
from astroid import bases
from pylint import checkers, interfaces
from pylint.interfaces import INFERENCE_FAILURE, INFERENCE
from pylint.utils import WarningScope
from pylint.checkers import utils
_ZERO = re.compile("^0+$")
def _is_old_octal(literal):
if _ZERO.match(literal):
return False
if re.match(r'0\d+', literal):
try:
int(literal, 8)
except ValueError:
return False
return True
def _check_dict_node(node):
inferred_types = set()
try:
inferred = node.infer()
for inferred_node in inferred:
inferred_types.add(inferred_node)
except astroid.InferenceError:
pass
return (not inferred_types
or any(isinstance(x, astroid.Dict) for x in inferred_types))
def _is_builtin(node):
return getattr(node, 'name', None) in ('__builtin__', 'builtins')
_ACCEPTS_ITERATOR = {'iter', 'list', 'tuple', 'sorted', 'set', 'sum', 'any',
'all', 'enumerate', 'dict'}
def _in_iterating_context(node):
"""Check if the node is being used as an iterator.
Definition is taken from lib2to3.fixer_util.in_special_context().
"""
parent = node.parent
# Since a call can't be the loop variant we only need to know if the node's
# parent is a 'for' loop to know it's being used as the iterator for the
# loop.
if isinstance(parent, astroid.For):
return True
# Need to make sure the use of the node is in the iterator part of the
# comprehension.
elif isinstance(parent, astroid.Comprehension):
if parent.iter == node:
return True
# Various built-ins can take in an iterable or list and lead to the same
# value.
elif isinstance(parent, astroid.Call):
if isinstance(parent.func, astroid.Name):
parent_scope = parent.func.lookup(parent.func.name)[0]
if _is_builtin(parent_scope) and parent.func.name in _ACCEPTS_ITERATOR:
return True
elif isinstance(parent.func, astroid.Attribute):
if parent.func.attrname == 'join':
return True
# If the call is in an unpacking, there's no need to warn,
# since it can be considered iterating.
elif (isinstance(parent, astroid.Assign) and
isinstance(parent.targets[0], (astroid.List, astroid.Tuple))):
if len(parent.targets[0].elts) > 1:
return True
return False
def _is_conditional_import(node):
"""Checks if a import node is in the context of a conditional.
"""
parent = node.parent
return isinstance(parent, (astroid.TryExcept, astroid.ExceptHandler,
astroid.If, astroid.IfExp))
Branch = namedtuple('Branch', ['node', 'is_py2_only'])
class Python3Checker(checkers.BaseChecker):
__implements__ = interfaces.IAstroidChecker
enabled = False
name = 'python3'
msgs = {
# Errors for what will syntactically break in Python 3, warnings for
# everything else.
'E1601': ('print statement used',
'print-statement',
'Used when a print statement is used '
'(`print` is a function in Python 3)',
{'maxversion': (3, 0)}),
'E1602': ('Parameter unpacking specified',
'parameter-unpacking',
'Used when parameter unpacking is specified for a function'
"(Python 3 doesn't allow it)",
{'maxversion': (3, 0)}),
'E1603': ('Implicit unpacking of exceptions is not supported '
'in Python 3',
'unpacking-in-except',
'Python3 will not allow implicit unpacking of '
'exceptions in except clauses. '
'See http://www.python.org/dev/peps/pep-3110/',
{'maxversion': (3, 0),
'old_names': [('W0712', 'unpacking-in-except')]}),
'E1604': ('Use raise ErrorClass(args) instead of '
'raise ErrorClass, args.',
'old-raise-syntax',
"Used when the alternate raise syntax "
"'raise foo, bar' is used "
"instead of 'raise foo(bar)'.",
{'maxversion': (3, 0),
'old_names': [('W0121', 'old-raise-syntax')]}),
'E1605': ('Use of the `` operator',
'backtick',
'Used when the deprecated "``" (backtick) operator is used '
'instead of the str() function.',
{'scope': WarningScope.NODE,
'maxversion': (3, 0),
'old_names': [('W0333', 'backtick')]}),
'E1609': ('Import * only allowed at module level',
'import-star-module-level',
'Used when the import star syntax is used somewhere '
'else than the module level.',
{'maxversion': (3, 0)}),
'W1601': ('apply built-in referenced',
'apply-builtin',
'Used when the apply built-in function is referenced '
'(missing from Python 3)',
{'maxversion': (3, 0)}),
'W1602': ('basestring built-in referenced',
'basestring-builtin',
'Used when the basestring built-in function is referenced '
'(missing from Python 3)',
{'maxversion': (3, 0)}),
'W1603': ('buffer built-in referenced',
'buffer-builtin',
'Used when the buffer built-in function is referenced '
'(missing from Python 3)',
{'maxversion': (3, 0)}),
'W1604': ('cmp built-in referenced',
'cmp-builtin',
'Used when the cmp built-in function is referenced '
'(missing from Python 3)',
{'maxversion': (3, 0)}),
'W1605': ('coerce built-in referenced',
'coerce-builtin',
'Used when the coerce built-in function is referenced '
'(missing from Python 3)',
{'maxversion': (3, 0)}),
'W1606': ('execfile built-in referenced',
'execfile-builtin',
'Used when the execfile built-in function is referenced '
'(missing from Python 3)',
{'maxversion': (3, 0)}),
'W1607': ('file built-in referenced',
'file-builtin',
'Used when the file built-in function is referenced '
'(missing from Python 3)',
{'maxversion': (3, 0)}),
'W1608': ('long built-in referenced',
'long-builtin',
'Used when the long built-in function is referenced '
'(missing from Python 3)',
{'maxversion': (3, 0)}),
'W1609': ('raw_input built-in referenced',
'raw_input-builtin',
'Used when the raw_input built-in function is referenced '
'(missing from Python 3)',
{'maxversion': (3, 0)}),
'W1610': ('reduce built-in referenced',
'reduce-builtin',
'Used when the reduce built-in function is referenced '
'(missing from Python 3)',
{'maxversion': (3, 0)}),
'W1611': ('StandardError built-in referenced',
'standarderror-builtin',
'Used when the StandardError built-in function is referenced '
'(missing from Python 3)',
{'maxversion': (3, 0)}),
'W1612': ('unicode built-in referenced',
'unicode-builtin',
'Used when the unicode built-in function is referenced '
'(missing from Python 3)',
{'maxversion': (3, 0)}),
'W1613': ('xrange built-in referenced',
'xrange-builtin',
'Used when the xrange built-in function is referenced '
'(missing from Python 3)',
{'maxversion': (3, 0)}),
'W1614': ('__coerce__ method defined',
'coerce-method',
'Used when a __coerce__ method is defined '
'(method is not used by Python 3)',
{'maxversion': (3, 0)}),
'W1615': ('__delslice__ method defined',
'delslice-method',
'Used when a __delslice__ method is defined '
'(method is not used by Python 3)',
{'maxversion': (3, 0)}),
'W1616': ('__getslice__ method defined',
'getslice-method',
'Used when a __getslice__ method is defined '
'(method is not used by Python 3)',
{'maxversion': (3, 0)}),
'W1617': ('__setslice__ method defined',
'setslice-method',
'Used when a __setslice__ method is defined '
'(method is not used by Python 3)',
{'maxversion': (3, 0)}),
'W1618': ('import missing `from __future__ import absolute_import`',
'no-absolute-import',
'Used when an import is not accompanied by '
'``from __future__ import absolute_import`` '
'(default behaviour in Python 3)',
{'maxversion': (3, 0)}),
'W1619': ('division w/o __future__ statement',
'old-division',
'Used for non-floor division w/o a float literal or '
'``from __future__ import division`` '
'(Python 3 returns a float for int division unconditionally)',
{'maxversion': (3, 0)}),
'W1620': ('Calling a dict.iter*() method',
'dict-iter-method',
'Used for calls to dict.iterkeys(), itervalues() or iteritems() '
'(Python 3 lacks these methods)',
{'maxversion': (3, 0)}),
'W1621': ('Calling a dict.view*() method',
'dict-view-method',
'Used for calls to dict.viewkeys(), viewvalues() or viewitems() '
'(Python 3 lacks these methods)',
{'maxversion': (3, 0)}),
'W1622': ('Called a next() method on an object',
'next-method-called',
"Used when an object's next() method is called "
'(Python 3 uses the next() built-in function)',
{'maxversion': (3, 0)}),
'W1623': ("Assigning to a class's __metaclass__ attribute",
'metaclass-assignment',
"Used when a metaclass is specified by assigning to __metaclass__ "
'(Python 3 specifies the metaclass as a class statement argument)',
{'maxversion': (3, 0)}),
'W1624': ('Indexing exceptions will not work on Python 3',
'indexing-exception',
'Indexing exceptions will not work on Python 3. Use '
'`exception.args[index]` instead.',
{'maxversion': (3, 0),
'old_names': [('W0713', 'indexing-exception')]}),
'W1625': ('Raising a string exception',
'raising-string',
'Used when a string exception is raised. This will not '
'work on Python 3.',
{'maxversion': (3, 0),
'old_names': [('W0701', 'raising-string')]}),
'W1626': ('reload built-in referenced',
'reload-builtin',
'Used when the reload built-in function is referenced '
'(missing from Python 3). You can use instead imp.reload '
'or importlib.reload.',
{'maxversion': (3, 0)}),
'W1627': ('__oct__ method defined',
'oct-method',
'Used when a __oct__ method is defined '
'(method is not used by Python 3)',
{'maxversion': (3, 0)}),
'W1628': ('__hex__ method defined',
'hex-method',
'Used when a __hex__ method is defined '
'(method is not used by Python 3)',
{'maxversion': (3, 0)}),
'W1629': ('__nonzero__ method defined',
'nonzero-method',
'Used when a __nonzero__ method is defined '
'(method is not used by Python 3)',
{'maxversion': (3, 0)}),
'W1630': ('__cmp__ method defined',
'cmp-method',
'Used when a __cmp__ method is defined '
'(method is not used by Python 3)',
{'maxversion': (3, 0)}),
# 'W1631': replaced by W1636
'W1632': ('input built-in referenced',
'input-builtin',
'Used when the input built-in is referenced '
'(backwards-incompatible semantics in Python 3)',
{'maxversion': (3, 0)}),
'W1633': ('round built-in referenced',
'round-builtin',
'Used when the round built-in is referenced '
'(backwards-incompatible semantics in Python 3)',
{'maxversion': (3, 0)}),
'W1634': ('intern built-in referenced',
'intern-builtin',
'Used when the intern built-in is referenced '
'(Moved to sys.intern in Python 3)',
{'maxversion': (3, 0)}),
'W1635': ('unichr built-in referenced',
'unichr-builtin',
'Used when the unichr built-in is referenced '
'(Use chr in Python 3)',
{'maxversion': (3, 0)}),
'W1636': ('map built-in referenced when not iterating',
'map-builtin-not-iterating',
'Used when the map built-in is referenced in a non-iterating '
'context (returns an iterator in Python 3)',
{'maxversion': (3, 0),
'old_names': [('W1631', 'implicit-map-evaluation')]}),
'W1637': ('zip built-in referenced when not iterating',
'zip-builtin-not-iterating',
'Used when the zip built-in is referenced in a non-iterating '
'context (returns an iterator in Python 3)',
{'maxversion': (3, 0)}),
'W1638': ('range built-in referenced when not iterating',
'range-builtin-not-iterating',
'Used when the range built-in is referenced in a non-iterating '
'context (returns an iterator in Python 3)',
{'maxversion': (3, 0)}),
'W1639': ('filter built-in referenced when not iterating',
'filter-builtin-not-iterating',
'Used when the filter built-in is referenced in a non-iterating '
'context (returns an iterator in Python 3)',
{'maxversion': (3, 0)}),
'W1640': ('Using the cmp argument for list.sort / sorted',
'using-cmp-argument',
'Using the cmp argument for list.sort or the sorted '
'builtin should be avoided, since it was removed in '
'Python 3. Using either `key` or `functools.cmp_to_key` '
'should be preferred.',
{'maxversion': (3, 0)}),
'W1641': ('Implementing __eq__ without also implementing __hash__',
'eq-without-hash',
'Used when a class implements __eq__ but not __hash__. In Python 2, objects '
'get object.__hash__ as the default implementation, in Python 3 objects get '
'None as their default __hash__ implementation if they also implement __eq__.',
{'maxversion': (3, 0)}),
'W1642': ('__div__ method defined',
'div-method',
'Used when a __div__ method is defined. Using `__truediv__` and setting'
'__div__ = __truediv__ should be preferred.'
'(method is not used by Python 3)',
{'maxversion': (3, 0)}),
'W1643': ('__idiv__ method defined',
'idiv-method',
'Used when a __idiv__ method is defined. Using `__itruediv__` and setting'
'__idiv__ = __itruediv__ should be preferred.'
'(method is not used by Python 3)',
{'maxversion': (3, 0)}),
'W1644': ('__rdiv__ method defined',
'rdiv-method',
'Used when a __rdiv__ method is defined. Using `__rtruediv__` and setting'
'__rdiv__ = __rtruediv__ should be preferred.'
'(method is not used by Python 3)',
{'maxversion': (3, 0)}),
'W1645': ('Exception.message removed in Python 3',
'exception-message-attribute',
'Used when the message attribute is accessed on an Exception. Use '
'str(exception) instead.',
{'maxversion': (3, 0)}),
'W1646': ('non-text encoding used in str.decode',
'invalid-str-codec',
'Used when using str.encode or str.decode with a non-text encoding. Use '
'codecs module to handle arbitrary codecs.',
{'maxversion': (3, 0)}),
'W1647': ('sys.maxint removed in Python 3',
'sys-max-int',
'Used when accessing sys.maxint. Use sys.maxsize instead.',
{'maxversion': (3, 0)}),
'W1648': ('Module moved in Python 3',
'bad-python3-import',
'Used when importing a module that no longer exists in Python 3.',
{'maxversion': (3, 0)}),
'W1649': ('Accessing a function method on the string module',
'deprecated-string-function',
'Used when accessing a string function that has been deprecated in Python 3.',
{'maxversion': (3, 0)}),
'W1650': ('Using str.translate with deprecated deletechars parameters',
'deprecated-str-translate-call',
'Used when using the deprecated deletechars parameters from str.translate. Use'
're.sub to remove the desired characters ',
{'maxversion': (3, 0)}),
}
_bad_builtins = frozenset([
'apply',
'basestring',
'buffer',
'cmp',
'coerce',
'execfile',
'file',
'input', # Not missing, but incompatible semantics
'intern',
'long',
'raw_input',
'reduce',
'round', # Not missing, but incompatible semantics
'StandardError',
'unichr',
'unicode',
'xrange',
'reload',
])
_unused_magic_methods = frozenset([
'__coerce__',
'__delslice__',
'__getslice__',
'__setslice__',
'__oct__',
'__hex__',
'__nonzero__',
'__cmp__',
'__div__',
'__idiv__',
'__rdiv__',
])
_invalid_encodings = frozenset([
'base64_codec',
'base64',
'base_64',
'bz2_codec',
'bz2',
'hex_codec',
'hex',
'quopri_codec',
'quopri',
'quotedprintable',
'quoted_printable',
'uu_codec',
'uu',
'zlib_codec',
'zlib',
'zip',
'rot13',
'rot_13',
])
_bad_python3_module_map = {
'sys-max-int': {
'sys': frozenset(['maxint'])
},
'bad-python3-import': frozenset([
'anydbm', 'BaseHTTPServer', '__builtin__', 'CGIHTTPServer', 'ConfigParser', 'copy_reg',
'cPickle', 'cProfile', 'cStringIO', 'Cookie', 'cookielib', 'dbhash', 'dbm', 'dumbdbm',
'dumbdb', 'Dialog', 'DocXMLRPCServer', 'FileDialog', 'FixTk', 'gdbm', 'htmlentitydefs',
'HTMLParser', 'httplib', 'markupbase', 'Queue', 'repr', 'robotparser', 'ScrolledText',
'SimpleDialog', 'SimpleHTTPServer', 'SimpleXMLRPCServer', 'StringIO', 'dummy_thread',
'SocketServer', 'test.test_support', 'Tkinter', 'Tix', 'Tkconstants', 'tkColorChooser',
'tkCommonDialog', 'Tkdnd', 'tkFileDialog', 'tkFont', 'tkMessageBox', 'tkSimpleDialog',
'turtle', 'UserList', 'UserString', 'whichdb', '_winreg', 'xmlrpclib', 'audiodev',
'Bastion', 'bsddb185', 'bsddb3', 'Canvas', 'cfmfile', 'cl', 'commands', 'compiler',
'dircache', 'dl', 'exception', 'fpformat', 'htmllib', 'ihooks', 'imageop', 'imputil',
'linuxaudiodev', 'md5', 'mhlib', 'mimetools', 'MimeWriter', 'mimify', 'multifile',
'mutex', 'new', 'popen2', 'posixfile', 'pure', 'rexec', 'rfc822', 'sha', 'sgmllib',
'sre', 'stat', 'stringold', 'sunaudio', 'sv', 'test.testall', 'thread', 'timing',
'toaiff', 'user', 'urllib2', 'urlparse'
]),
'deprecated-string-function': {
'string': frozenset([
'maketrans', 'atof', 'atoi', 'atol', 'capitalize', 'expandtabs', 'find', 'rfind',
'index', 'rindex', 'count', 'lower', 'split', 'rsplit', 'splitfields', 'join',
'joinfields', 'lstrip', 'rstrip', 'strip', 'swapcase', 'translate', 'upper',
'ljust', 'rjust', 'center', 'zfill', 'replace'
])
}
}
if (3, 4) <= sys.version_info < (3, 4, 4):
# Python 3.4.0 -> 3.4.3 has a bug which breaks `repr_tree()`:
# https://bugs.python.org/issue23572
_python_2_tests = frozenset()
else:
_python_2_tests = frozenset(
[astroid.extract_node(x).repr_tree() for x in [
'sys.version_info[0] == 2',
'sys.version_info[0] < 3',
'sys.version_info == (2, 7)',
'sys.version_info <= (2, 7)',
'sys.version_info < (3, 0)',
]])
def __init__(self, *args, **kwargs):
self._future_division = False
self._future_absolute_import = False
self._modules_warned_about = set()
self._branch_stack = []
super(Python3Checker, self).__init__(*args, **kwargs)
def add_message(self, msg_id, always_warn=False, # pylint: disable=arguments-differ
*args, **kwargs):
if always_warn or not (self._branch_stack and self._branch_stack[-1].is_py2_only):
super(Python3Checker, self).add_message(msg_id, *args, **kwargs)
def _is_py2_test(self, node):
if isinstance(node.test, astroid.Attribute) and isinstance(node.test.expr, astroid.Name):
if node.test.expr.name == 'six' and node.test.attrname == 'PY2':
return True
elif (isinstance(node.test, astroid.Compare) and
node.test.repr_tree() in self._python_2_tests):
return True
return False
def visit_if(self, node):
self._branch_stack.append(Branch(node, self._is_py2_test(node)))
def leave_if(self, node):
assert self._branch_stack.pop().node == node
def visit_ifexp(self, node):
self._branch_stack.append(Branch(node, self._is_py2_test(node)))
def leave_ifexp(self, node):
assert self._branch_stack.pop().node == node
def visit_module(self, node): # pylint: disable=unused-argument
"""Clear checker state after previous module."""
self._future_division = False
self._future_absolute_import = False
def visit_functiondef(self, node):
if node.is_method() and node.name in self._unused_magic_methods:
method_name = node.name
if node.name.startswith('__'):
method_name = node.name[2:-2]
self.add_message(method_name + '-method', node=node)
@utils.check_messages('parameter-unpacking')
def visit_arguments(self, node):
for arg in node.args:
if isinstance(arg, astroid.Tuple):
self.add_message('parameter-unpacking', node=arg)
def visit_name(self, node):
"""Detect when a "bad" built-in is referenced."""
found_node = node.lookup(node.name)[0]
if _is_builtin(found_node):
if node.name in self._bad_builtins:
message = node.name.lower() + '-builtin'
self.add_message(message, node=node)
@utils.check_messages('print-statement')
def visit_print(self, node):
self.add_message('print-statement', node=node, always_warn=True)
def _warn_if_deprecated(self, node, module, attributes, report_on_modules=True):
for message, module_map in six.iteritems(self._bad_python3_module_map):
if module in module_map and module not in self._modules_warned_about:
if isinstance(module_map, frozenset):
if report_on_modules:
self._modules_warned_about.add(module)
self.add_message(message, node=node)
elif attributes and module_map[module].intersection(attributes):
self.add_message(message, node=node)
def visit_importfrom(self, node):
if node.modname == '__future__':
for name, _ in node.names:
if name == 'division':
self._future_division = True
elif name == 'absolute_import':
self._future_absolute_import = True
else:
if not self._future_absolute_import:
if self.linter.is_message_enabled('no-absolute-import'):
self.add_message('no-absolute-import', node=node)
if not _is_conditional_import(node):
self._warn_if_deprecated(node, node.modname, {x[0] for x in node.names})
if node.names[0][0] == '*':
if self.linter.is_message_enabled('import-star-module-level'):
if not isinstance(node.scope(), astroid.Module):
self.add_message('import-star-module-level', node=node)
def visit_import(self, node):
if not self._future_absolute_import:
self.add_message('no-absolute-import', node=node)
if not _is_conditional_import(node):
for name, _ in node.names:
self._warn_if_deprecated(node, name, None)
@utils.check_messages('metaclass-assignment')
def visit_classdef(self, node):
if '__metaclass__' in node.locals:
self.add_message('metaclass-assignment', node=node)
locals_and_methods = set(node.locals).union(x.name for x in node.mymethods())
if '__eq__' in locals_and_methods and '__hash__' not in locals_and_methods:
self.add_message('eq-without-hash', node=node)
@utils.check_messages('old-division')
def visit_binop(self, node):
if not self._future_division and node.op == '/':
for arg in (node.left, node.right):
if isinstance(arg, astroid.Const) and isinstance(arg.value, float):
break
else:
self.add_message('old-division', node=node)
def _check_cmp_argument(self, node):
# Check that the `cmp` argument is used
kwargs = []
if (isinstance(node.func, astroid.Attribute)
and node.func.attrname == 'sort'):
inferred = utils.safe_infer(node.func.expr)
if not inferred:
return
builtins_list = "{}.list".format(bases.BUILTINS)
if (isinstance(inferred, astroid.List)
or inferred.qname() == builtins_list):
kwargs = node.keywords
elif (isinstance(node.func, astroid.Name)
and node.func.name == 'sorted'):
inferred = utils.safe_infer(node.func)
if not inferred:
return
builtins_sorted = "{}.sorted".format(bases.BUILTINS)
if inferred.qname() == builtins_sorted:
kwargs = node.keywords
for kwarg in kwargs or []:
if kwarg.arg == 'cmp':
self.add_message('using-cmp-argument', node=node)
return
@staticmethod
def _is_constant_string_or_name(node):
if isinstance(node, astroid.Const):
return isinstance(node.value, six.string_types)
return isinstance(node, astroid.Name)
@staticmethod
def _is_none(node):
return isinstance(node, astroid.Const) and node.value is None
@staticmethod
def _has_only_n_positional_args(node, number_of_args):
return len(node.args) == number_of_args and all(node.args) and not node.keywords
@staticmethod
def _could_be_string(inferred_types):
confidence = INFERENCE if inferred_types else INFERENCE_FAILURE
for inferred_type in inferred_types:
if inferred_type is astroid.Uninferable:
confidence = INFERENCE_FAILURE
elif not (isinstance(inferred_type, astroid.Const) and
isinstance(inferred_type.value, six.string_types)):
return None
return confidence
def visit_call(self, node):
self._check_cmp_argument(node)
if isinstance(node.func, astroid.Attribute):
inferred_types = set()
try:
for inferred_receiver in node.func.expr.infer():
inferred_types.add(inferred_receiver)
if isinstance(inferred_receiver, astroid.Module):
self._warn_if_deprecated(node, inferred_receiver.name,
{node.func.attrname},
report_on_modules=False)
except astroid.InferenceError:
pass
if node.args:
is_str_confidence = self._could_be_string(inferred_types)
if is_str_confidence:
if (node.func.attrname in ('encode', 'decode') and
len(node.args) >= 1 and node.args[0]):
first_arg = node.args[0]
self._validate_encoding(first_arg, node)
if (node.func.attrname == 'translate' and
self._has_only_n_positional_args(node, 2) and
self._is_none(node.args[0]) and
self._is_constant_string_or_name(node.args[1])):
# The above statement looking for calls of the form:
#
# foo.translate(None, 'abc123')
#
# or
#
# foo.translate(None, some_variable)
#
# This check is somewhat broad and _may_ have some false positives, but
# after checking several large codebases it did not have any false
# positives while finding several real issues. This call pattern seems
# rare enough that the trade off is worth it.
self.add_message('deprecated-str-translate-call',
node=node,
confidence=is_str_confidence)
return
if node.keywords:
return
if node.func.attrname == 'next':
self.add_message('next-method-called', node=node)
else:
if _check_dict_node(node.func.expr):
if node.func.attrname in ('iterkeys', 'itervalues', 'iteritems'):
self.add_message('dict-iter-method', node=node)
elif node.func.attrname in ('viewkeys', 'viewvalues', 'viewitems'):
self.add_message('dict-view-method', node=node)
elif isinstance(node.func, astroid.Name):
found_node = node.func.lookup(node.func.name)[0]
if _is_builtin(found_node):
if node.func.name in ('filter', 'map', 'range', 'zip'):
if not _in_iterating_context(node):
checker = '{}-builtin-not-iterating'.format(node.func.name)
self.add_message(checker, node=node)
if node.func.name == 'open' and node.keywords:
kwargs = node.keywords
for kwarg in kwargs or []:
if kwarg.arg == 'encoding':
self._validate_encoding(kwarg.value, node)
break
def _validate_encoding(self, encoding, node):
if isinstance(encoding, astroid.Const):
value = encoding.value
if value in self._invalid_encodings:
self.add_message('invalid-str-codec',
node=node)
@utils.check_messages('indexing-exception')
def visit_subscript(self, node):
""" Look for indexing exceptions. """
try:
for inferred in node.value.infer():
if not isinstance(inferred, astroid.Instance):
continue
if utils.inherit_from_std_ex(inferred):
self.add_message('indexing-exception', node=node)
except astroid.InferenceError:
return
def visit_assignattr(self, node):
if isinstance(node.assign_type(), astroid.AugAssign):
self.visit_attribute(node)
def visit_delattr(self, node):
self.visit_attribute(node)
@utils.check_messages('exception-message-attribute')
def visit_attribute(self, node):
""" Look for accessing message on exceptions. """
try:
for inferred in node.expr.infer():
if (isinstance(inferred, astroid.Instance) and
utils.inherit_from_std_ex(inferred)):
if node.attrname == 'message':
self.add_message('exception-message-attribute', node=node)
if isinstance(inferred, astroid.Module):
self._warn_if_deprecated(node, inferred.name, {node.attrname},
report_on_modules=False)
except astroid.InferenceError:
return
@utils.check_messages('unpacking-in-except')
def visit_excepthandler(self, node):
"""Visit an except handler block and check for exception unpacking."""
if isinstance(node.name, (astroid.Tuple, astroid.List)):
self.add_message('unpacking-in-except', node=node)
@utils.check_messages('backtick')
def visit_repr(self, node):
self.add_message('backtick', node=node)
@utils.check_messages('raising-string', 'old-raise-syntax')
def visit_raise(self, node):
"""Visit a raise statement and check for raising
strings or old-raise-syntax.
"""
if (node.exc is not None and
node.inst is not None and
node.tback is None):
self.add_message('old-raise-syntax', node=node)
# Ignore empty raise.
if node.exc is None:
return
expr = node.exc
if self._check_raise_value(node, expr):
return
else:
try:
value = next(astroid.unpack_infer(expr))
except astroid.InferenceError:
return
self._check_raise_value(node, value)
def _check_raise_value(self, node, expr):
if isinstance(expr, astroid.Const):
value = expr.value
if isinstance(value, str):
self.add_message('raising-string', node=node)
return True
class Python3TokenChecker(checkers.BaseTokenChecker):
__implements__ = interfaces.ITokenChecker
name = 'python3'
enabled = False
msgs = {
'E1606': ('Use of long suffix',
'long-suffix',
'Used when "l" or "L" is used to mark a long integer. '
'This will not work in Python 3, since `int` and `long` '
'types have merged.',
{'maxversion': (3, 0)}),
'E1607': ('Use of the <> operator',
'old-ne-operator',
'Used when the deprecated "<>" operator is used instead '
'of "!=". This is removed in Python 3.',
{'maxversion': (3, 0),
'old_names': [('W0331', 'old-ne-operator')]}),
'E1608': ('Use of old octal literal',
'old-octal-literal',
'Used when encountering the old octal syntax, '
'removed in Python 3. To use the new syntax, '
'prepend 0o on the number.',
{'maxversion': (3, 0)}),
}
def process_tokens(self, tokens):
for idx, (tok_type, token, start, _, _) in enumerate(tokens):
if tok_type == tokenize.NUMBER:
if token.lower().endswith('l'):
# This has a different semantic than lowercase-l-suffix.
self.add_message('long-suffix', line=start[0])
elif _is_old_octal(token):
self.add_message('old-octal-literal', line=start[0])
if tokens[idx][1] == '<>':
self.add_message('old-ne-operator', line=tokens[idx][2][0])
def register(linter):
linter.register_checker(Python3Checker(linter))
linter.register_checker(Python3TokenChecker(linter))
|
arju88nair/projectCulminate
|
venv/lib/python3.5/site-packages/pylint/checkers/python3.py
|
Python
|
apache-2.0
| 38,460
|
[
"VisIt"
] |
260d985e54adb6fd9567aad77bff40d2101d233e70a313a3f2459af62d783ad2
|
import numpy as np
from scipy.spatial import cKDTree
from .synthClustPrep import setSynthClust
from ..best_fit.obs_clust_prepare import dataProcess
from .. import update_progress
def main(clp, pd):
"""
Assign masses to the (decontaminated) observed cluster, and binary
probabilities (if binarity was estimated).
"""
# Dummy arrays
clp['st_mass_mean'], clp['st_mass_std'],\
clp['st_mass_mean_binar'], clp['st_mass_std_binar'],\
clp['prob_binar'] = [np.array([]) for _ in range(5)]
# No best fit process was employed
if pd['best_fit_algor'] == 'n':
return clp
# Generate random models from the selected solution (mean, median, mode,
# MAP), given by 'D3_sol.
models = ranModels(
pd['fundam_params'], pd['D3_sol'], clp['isoch_fit_params'],
clp['isoch_fit_errors'])
if not models.any():
print(" WARNING: could not assign masses and binary probabilities")
return clp
print("Estimating binary probabilities and masses")
# Extract photometry used in the best fit process
mags_cols_cl, _ = dataProcess(clp['cl_max_mag'])
# Arrange properly
mags, cols = [np.array(_) for _ in mags_cols_cl]
obs_phot = np.concatenate([mags, cols]).T
# Initiate empty arrays for mean and variance
st_mass_mean, M2 = np.zeros(obs_phot.shape[0]), np.zeros(obs_phot.shape[0])
st_mass_mean_binar, M2_binar = np.zeros(obs_phot.shape[0]),\
np.zeros(obs_phot.shape[0])
prob_binar = np.zeros(obs_phot.shape[0])
# Estimate the mean and variance for each star via recurrence.
Nm_binar = 0
for Nm, model in enumerate(models):
# Generate synthetic cluster from the 'model'.
isoch = setSynthClust(model, *clp['syntClustArgs'])
if not isoch.any():
continue
# Masses, binary mask
mass_primary = isoch[pd['m_ini_idx']]
binar_idxs = ~(isoch[-1] == -99.)
mass_secondary = isoch[-1]
# shape: (N_stars, Ndim)
photom = isoch[:sum(pd['N_fc'])].T
# For non-binary systems
photom_single = photom[~binar_idxs]
if photom_single.any():
obs_mass, lkl_p = photomMatch(
obs_phot, photom_single, mass_primary[~binar_idxs])
# Estimate mean and variance
st_mass_mean, M2 = recurrentStats(Nm, st_mass_mean, M2, obs_mass)
# For binary systems
if pd['binar_flag']:
photom_binar = photom[binar_idxs]
# If there are no binary systems, skip
if photom_binar.any():
Nm_binar += 1
obs_mass, lkl_b = photomMatch(
obs_phot, photom_binar, mass_secondary[binar_idxs])
st_mass_mean_binar, M2_binar = recurrentStats(
Nm, st_mass_mean_binar, M2_binar, obs_mass)
# Bayesian probability
new_prob_binar = 1. / (1. + (lkl_p / lkl_b))
prob_binar = recurrentStats(
Nm, prob_binar, None, new_prob_binar)
update_progress.updt(models.shape[0], Nm + 1)
# Store standard deviations
st_mass_std = np.sqrt(M2 / Nm)
st_mass_std_binar = np.sqrt(M2_binar / max(1, Nm_binar))
clp['st_mass_mean'], clp['st_mass_std'], clp['st_mass_mean_binar'],\
clp['st_mass_std_binar'], clp['prob_binar'] = st_mass_mean,\
st_mass_std, st_mass_mean_binar, st_mass_std_binar, prob_binar
return clp
def ranModels(fundam_params, D3_sol, isoch_fit_params, isoch_fit_errors,
N_models=1000):
"""
Generate the requested models via sampling a Gaussian centered on the
selected solution, with standard deviation given by the attached
uncertainty.
HARDCODED:
N_models: number of models to generate.
"""
# Use the selected solution values for all the parameters.
model = isoch_fit_params[D3_sol + '_sol']
# Extract standard deviations.
p_vals, nancount = [], 0
for i, p in enumerate(model):
std = isoch_fit_errors[i][-1]
if not np.isnan(std):
p_vals.append([
p, std, min(fundam_params[i]), max(fundam_params[i])])
else:
# The parameter has no uncertainty attached
nancount += 1
# Check if at least one parameter has an uncertainty attached.
if nancount < 6:
# Generate 'N_models' random models.
models = []
for par in p_vals:
model = np.random.normal(par[0], par[1], N_models)
model = np.clip(model, a_min=par[2], a_max=par[3])
models.append(model)
models = np.array(models).T
else:
models = np.array([])
return models
def photomMatch(obs_phot, photom, mass_ini):
"""
For each observed star in 'obs_phot', find the closest synthetic star in
the (synthetic) photometric space 'photom'
"""
tree = cKDTree(photom)
dd, ii = tree.query(obs_phot, k=1)
# Assign masses to each observed star
obs_mass = mass_ini[ii]
# Likelihood is defined as the inverse of the distance
lkl = 1. / dd
return obs_mass, lkl
def recurrentStats(Nm, mean, var, newValue):
"""
Source: en.wikipedia.org/wiki/
Algorithms_for_calculating_variance#Welford's_online_algorithm
"""
count = Nm + 1
delta = newValue - mean
mean += delta / count
if var is None:
return mean
var += delta * (newValue - mean)
return mean, var
|
asteca/ASteCA
|
packages/synth_clust/masses_binar_probs.py
|
Python
|
gpl-3.0
| 5,556
|
[
"Gaussian"
] |
06138e5d3c5787eaed51fd72fa1ba2bd85c7b159a37c41d40838e110aae3a7ad
|
#!/usr/bin/env python
"""
Vision demo configuration routines.
Notes
-----
Information regarding the cartridges and columns that own specific
neurons are not used during execution, but may be used for
examining the generated LPU graphs.
Genetic/neurotransmitter information included in the neuron data is artificial
and does not have any biological significance.
"""
import collections
import csv
import os
import networkx as nx
import numpy as np
from neurokernel.LPU.LPU import LPU
from neurokernel.pattern import Pattern
import neurokernel.plsel as plsel
class hex_array(object):
"""
0 1 2 3 4
----------------------> cols (X=cols*sqrt(3))
0| 0 2 4
| 1 3
1| 5 7 9
| 6 8
2| 10 12 14
| 11 13
|
V
rows (first col: 0,2,4,6)
(Y=2*row if col is even else Y=2*row+1 )
"""
def __init__(self, nrows, ncols):
self.nrows = nrows
self.ncols = ncols
self.num_elements = nrows * ncols
self.X = np.tile(np.arange(self.ncols, dtype = np.double).reshape((1, self.ncols))*np.sqrt(3),
(self.nrows, 1))
if (self.ncols % 2 == 0):
self.Y = np.tile(np.arange(2*self.nrows, dtype = np.double).reshape((self.nrows, 2)),
(1, self.ncols//2))
else:
self.Y = np.tile(np.arange(2*self.nrows, dtype = np.double).reshape((self.nrows, 2)),
(1, self.ncols//2+1))
self.Y = self.Y[:,0:-1]
self.col = np.tile(np.arange(self.ncols, dtype = np.int32).reshape((1, self.ncols)),
(self.nrows, 1))
self.row = np.tile(np.arange(self.nrows, dtype = np.int32).reshape((self.nrows, 1)),
(1, self.ncols))
#self.Y = self.Y + np.tile(np.asarray([0, 1]),
# (self.nrows, self.ncols/2))
self.col = self.col.reshape(-1)
self.row = self.row.reshape(-1)
self.num = np.arange(self.num_elements, dtype = np.int32).reshape(nrows, ncols)
def find_neighbor(self, row, col):
"""
neighbors are defined relatively as
1
2 6
0
3 5
4
"""
if col < 0 or col >= self.ncols:
raise ValueError("column number " + str(col) + " exceeds array limit")
if row < 0 or row >= self.nrows:
raise ValueError("row number " + str(row) + " exceeds array limit")
# adding neighbor 0 (self)
neighbor = [self.num[row, col]]
# adding neighbor 1
neighbor.append(self.num[row-1, col] if row != 0 else None)
# adding neighbor 2, 3
if col == 0:
neighbor.extend([None, None])
elif col % 2 == 0:
if row == 0:
neighbor.extend([None, self.num[row, col-1]])
else:
neighbor.extend(list(self.num[row-1:row+1, col-1]))
else:
if row == self.nrows-1:
neighbor.extend([self.num[row, col-1], None])
else:
neighbor.extend(list(self.num[row:row+2, col-1]))
# adding neighbor 4
neighbor.append(self.num[row+1, col] if row != self.nrows-1 else None)
# adding neighbor 5, 6
if col == self.ncols-1:
neighbor.extend([None, None])
elif col % 2 == 0:
if row == 0:
neighbor.extend([self.num[row, col+1], None])
else:
neighbor.extend(
list(self.num[row:row-2 if row-2 >= 0 else None:-1, col+1]))
else:
if row == self.nrows-1:
neighbor.extend([None, self.num[row, col+1]])
else:
neighbor.extend(
list(self.num[row+1:row-1 if row-1 >= 0 else None:-1, col+1]))
return neighbor
class vision_LPU(object):
def __init__(self, nrows, ncols, neuron_csv,
columnar_synapse_csv, other_synapse_csv,
LPU_name):
self.nrows = nrows
self.ncols = ncols
self.num_cartridges = nrows * ncols
self.neuron_csv = neuron_csv
self.columnar_synapse_csv = columnar_synapse_csv
self.other_synapse_csv = other_synapse_csv
self.hexarray = hex_array(nrows, ncols)
self._connected = False
self.LPU_name = LPU_name
self.composition_rules = []
# read in csv file and turn it into a numpy structured array
neuron_list = []
dtypes = [np.dtype('S10'), np.dtype('S32'),
np.dtype(np.int32), np.dtype(np.int32),
np.dtype(np.int32), np.dtype(np.int32),
np.dtype(np.int32), np.dtype(np.int32),
np.dtype(np.double), np.dtype(np.double),
np.dtype(np.double), np.dtype(np.double),
np.dtype(np.double), np.dtype(np.double),
np.dtype(np.double), np.dtype(np.double), np.dtype('S32'), np.dtype('S32')]
with open(self.neuron_csv, 'rU') as csvfile:
reader = csv.reader(csvfile)
self.neuron_field_name = reader.next()
n_entry = len(self.neuron_field_name)
for row in reader:
tmp = [dtypes[i].type(row[i]) for i in range(n_entry)]
neuron_list.append(tuple(tmp))
self.num_neuron_types = len(neuron_list)
self.neuron_dict = np.array(
neuron_list,
dtype = [(a, b) for a, b in zip(self.neuron_field_name, dtypes)])
# read in csv file and turn it into a numpy structured array
if self.columnar_synapse_csv is not None:
synapse_list = []
dtypes = [np.dtype('S10'), np.dtype('S10'),
np.dtype('S32'),
np.dtype(np.int32), np.dtype(np.double),
np.dtype(np.double), np.dtype(np.double),
np.dtype(np.double), np.dtype(np.double),
np.dtype(np.double), np.dtype(np.double),
np.dtype(np.int32)]
with open(self.columnar_synapse_csv, 'rU') as csvfile:
reader = csv.reader(csvfile)
synapse_field_name = reader.next()
n_entry = len(synapse_field_name)
for row in reader:
tmp = [dtypes[i].type(row[i]) for i in range(n_entry)]
synapse_list.append(tuple(tmp))
self.num_synapse_types = len(synapse_list)
self.synapse_dict = np.array(
synapse_list,
dtype = [(a, b) for a, b in zip(synapse_field_name, dtypes)])
else:
# TODO: will fail later if synapse_dict is empty
self.num_synapse_types = 0
self.synapse_dict = []
if self.other_synapse_csv is not None:
synapse_list = []
dtypes = [np.dtype('S10'), np.dtype('S10'),
np.dtype('S32'),
np.dtype(np.double),
np.dtype(np.double), np.dtype(np.double),
np.dtype(np.double), np.dtype(np.double),
np.dtype(np.double), np.dtype(np.double),
np.dtype(np.int32)]
with open(self.other_synapse_csv, 'rU') as csvfile:
reader = csv.reader(csvfile)
synapse_field_name = reader.next()
n_entry = len(synapse_field_name)
for row in reader:
tmp = [dtypes[i].type(row[i]) for i in range(n_entry)]
synapse_list.append(tuple(tmp))
self.num_other_synapse_types = len(synapse_list)
self.other_synapse_dict = np.array(
synapse_list,
dtype = [(a, b) for a, b in zip(synapse_field_name, dtypes)])
else:
self.num_other_synapse_types = 0
self.other_synapse_dict = []
def create_cartridges(self):
# create a number of cartridges
self.cartridge_neuron_dict = self.neuron_dict[self.neuron_dict['columnar'] == 1]
self.cartridge_synapse_dict = self.synapse_dict[self.synapse_dict['cart'] == 0]
self.cartridges = []
for _ in range(self.num_cartridges):
self.cartridges.append(
Cartridge(self.cartridge_neuron_dict,
self.cartridge_synapse_dict))
def connect_cartridges(self):
# connect cartridge from their neighbors
if not hasattr(self, 'cartridges'):
raise AttributeError("Need to create cartridges before connecting them")
count = 0
for cartridge in self.cartridges:
row = np.asscalar(self.hexarray.row[count])
col = np.asscalar(self.hexarray.col[count])
cartridge.assign_pos(count, row, col,
np.asscalar(self.hexarray.X[row,col]),
np.asscalar(self.hexarray.Y[row,col]))
neighbor_num = self.hexarray.find_neighbor(row, col)
cartridge.set_neighbors(
[self.cartridges[num] if num is not None else None
for num in neighbor_num])
count += 1
self._connected = True
def create_non_columnar_neurons(self):
self.non_columnar_neurons = collections.OrderedDict()
self.non_columnar_neuron_list = self.neuron_dict[self.neuron_dict['columnar'] != 1]
dtnames = self.non_columnar_neuron_list.dtype.names
for neuron_dict in self.non_columnar_neuron_list:
name = neuron_dict['name']
self.non_columnar_neurons.update({name: []})
for _ in range(neuron_dict['columnar']):
self.non_columnar_neurons[name].append(
Neuron(dict(zip(dtnames, [np.asscalar(p) for p in neuron_dict]))))
def remove_cartridge(self, num):
pass
def remove_neuron_type(self, name):
pass
def __repr__(self):
if hasattr(self, 'cartridges'):
return 'LPU with '+str(len(self.cartridges))+' cartridges'
else:
return 'LPU unconfigured'
def to_graph(self):
g = nx.MultiDiGraph()
num = 0
for neuron_type in self.neuron_dict:
if not neuron_type['dummy']:
if neuron_type['columnar'] == 1:
name = neuron_type['name']
for cartridge in self.cartridges:
neuron = cartridge.neurons[name]
neuron.add_num(num)
neuron.process_before_export()
if self.__class__.__name__ == 'Lamina':
neuron.params['circuit'] = 'cart' + str(cartridge.num)
else:
neuron.params['circuit'] = 'col' + str(cartridge.num)
g.add_node(num, neuron.params)
num += 1
for name in self.non_columnar_neurons.iterkeys():
for neuron in self.non_columnar_neurons[name]:
neuron.add_num(num)
neuron.process_before_export()
g.add_node(num, neuron.params)
num += 1
for cartridge in self.cartridges:
for synapse in cartridge.synapses:
synapse.process_before_export()
if self.__class__.__name__ == 'Lamina':
synapse.params['circuit'] = 'cart' + str(cartridge.num)
else:
synapse.params['circuit'] = 'col' + str(cartridge.num)
g.add_edge(synapse.pre_neuron.num, synapse.post_neuron.num,
attr_dict = synapse.params)
for cr in self.composition_rules:
for synapse in cr['synapses']:
synapse.process_before_export()
synapse.params['circuit'] = 'cr' + str(cr['num'])
g.add_edge(synapse.pre_neuron.num, synapse.post_neuron.num,
attr_dict = synapse.params)
return g
def export_to_gexf(self, filename):
g = self.to_graph()
nx.write_gexf(g, filename, prettyprint=True)
return g
def add_selectors(self):
for neuron_type in self.neuron_dict:
if not neuron_type['dummy']:
if neuron_type['columnar'] == 1:
if neuron_type['public'] == 1:
name = neuron_type['name']
for cartridge in self.cartridges:
neuron = cartridge.neurons[name]
neuron.add_selector(
'/'+self.LPU_name+'/cart{0}'.format(cartridge.num)
+'/'+name)
for name in self.non_columnar_neurons.iterkeys():
count = 0
for neuron in self.non_columnar_neurons[name]:
if neuron.is_public():
neuron.add_selector(
'/'+self.LPU_name+'/'+name+'[{0}]'.format(count))
count += 1
class Lamina(vision_LPU):
def __init__(self, nrows, ncols, neuron_csv,
columnar_synapse_csv, other_synapse_csv):
super(Lamina, self).__init__(nrows, ncols, neuron_csv,
columnar_synapse_csv, other_synapse_csv,
'lamina')
def connect_composition_II(self):
# create synapses defined in composition rule II.
if not self._connected:
raise AttributeError("Need to connect cartridges before setting interconnects")
self.rule2synapses = self.synapse_dict[self.synapse_dict['cart'] != 0]
synapse_list = []
dtnames = self.rule2synapses.dtype.names
for cartridge in self.cartridges:
for synapse_array in self.rule2synapses:
neighbor_num = synapse_array['cart']
if cartridge.neighbors[neighbor_num] is not None:
synapse = Synapse(
dict(zip(dtnames, [np.asscalar(p) for p in synapse_array])))
synapse.link(
cartridge.neurons[synapse_array['prename']],
cartridge.neighbors[neighbor_num].neurons[synapse_array['postname']])
synapse_list.append(synapse)
self.composition_rules.append({'synapses': synapse_list, 'num':2})
def connect_composition_I(self):
am_list = self.non_columnar_neurons['Am']
synapse_list = []
n_amacrine = len(am_list) # self.non_columnar_neuron_number['Am']
am_xpos = np.random.random(n_amacrine)*self.hexarray.X[-1,-1]
am_ypos = np.random.random(n_amacrine)*self.hexarray.Y[-1,-1]
count = 0
for neuron in am_list:
neuron.assign_pos(np.asscalar(am_xpos[count]),
np.asscalar(am_ypos[count]))
neuron.params['circuit'] = 'cr1'
count += 1
bound = 4.0
alpha_profiles = ['a1', 'a2', 'a3', 'a4', 'a5', 'a6']
fill = np.zeros((n_amacrine, self.num_cartridges), np.int32);
count = 0
for cartridge in self.cartridges:
xpos = cartridge.xpos
ypos = cartridge.ypos
# calculate distance and find amacrine cells within
# distance defined by bound
dist = np.sqrt((xpos-am_xpos)**2 + (ypos-am_ypos)**2)
suitable_am = np.nonzero(dist <= bound)[0]
# if less than 4 neurons in the bound, get
# the 4 closest amacrine cells
if suitable_am.size < 4:
suitable_am = np.argsort(dist)[0:4]
for name in alpha_profiles:
assigned = False
for am_num in np.random.permutation(suitable_am):
if fill[am_num, count] < 3:
fill[am_num, count] += 1
#a1-a6 do not have synapses outside a cartridge
synapses = cartridge.replace_dummy(name, am_list[am_num])
synapse_list.extend(synapses)
assigned = True
break
if not assigned:
print name + ' in cartridge ' + str(cartridge.num) + ' not assigned'
count += 1
self.fill = fill
self.composition_rules.append( {'synapses': synapse_list, 'num':1} )
def __repr__(self):
if hasattr(self, 'cartridges'):
return 'Lamina LPU with '+str(len(self.cartridges))+' cartridges'
else:
return 'Lamina LPU unconfigured'
class Medulla(vision_LPU):
def __init__(self, nrows, ncols, neuron_csv,
columnar_synapse_csv, other_synapse_csv):
super(Medulla, self).__init__(nrows, ncols, neuron_csv,
columnar_synapse_csv, other_synapse_csv,
'medulla')
def connect_composition_I(self):
if not self._connected:
raise AttributeError("Need to connect cartridges before setting interconnects")
self.rule1synapses = self.synapse_dict[self.synapse_dict['cart'] != 0]
synapse_list = []
dtnames = self.rule1synapses.dtype.names
for cartridge in self.cartridges:
for synapse_array in self.rule1synapses:
neighbor_num = synapse_array['cart']
if cartridge.neighbors[neighbor_num] is not None:
synapse = Synapse(
dict(zip(dtnames, [np.asscalar(p) for p in synapse_array])))
synapse.link(
cartridge.neurons[synapse_array['prename']],
cartridge.neighbors[neighbor_num].neurons[synapse_array['postname']])
synapse_list.append(synapse)
self.composition_rules.append({'synapses': synapse_list, 'num':1})
def connect_composition_II(self):
synapse_list = []
rule2synapses = self.other_synapse_dict[self.other_synapse_dict['postname'] == 'Dm3']
dtnames = rule2synapses.dtype.names
synapse_array = rule2synapses[0]
for cartridge in self.cartridges:
if cartridge.neighbors[2] is not None:
synapse = Synapse(
dict(zip(dtnames, [np.asscalar(p) for p in synapse_array])))
synapse.link(
cartridge.neurons['L2'],
cartridge.neighbors[2].neurons['Dm3'])
synapse_list.append(synapse)
if cartridge.neighbors[3] is not None:
synapse = Synapse(
dict(zip(dtnames, [np.asscalar(p) for p in synapse_array])))
synapse.link(
cartridge.neurons['L2'],
cartridge.neighbors[3].neurons['Dm3'])
synapse_list.append(synapse)
if cartridge.neighbors[5] is not None:
synapse = Synapse(
dict(zip(dtnames, [np.asscalar(p) for p in synapse_array])))
synapse.link(
cartridge.neurons['L2'],
cartridge.neighbors[5].neurons['Dm3'])
synapse_list.append(synapse)
if cartridge.neighbors[6] is not None:
synapse = Synapse(
dict(zip(dtnames, [np.asscalar(p) for p in synapse_array])))
synapse.link(
cartridge.neurons['L2'],
cartridge.neighbors[6].neurons['Dm3'])
synapse_list.append(synapse)
if cartridge.neighbors[2] is not None:
if cartridge.neighbors[2].neighbors[3] is not None:
synapse = Synapse(
dict(zip(dtnames, [np.asscalar(p) for p in synapse_array])))
synapse.link(
cartridge.neurons['L2'],
cartridge.neighbors[2].neighbors[3].neurons['Dm3'])
synapse_list.append(synapse)
elif cartridge.neighbors[3] is not None:
if cartridge.neighbors[3].neighbors[2] is not None:
synapse = Synapse(
dict(zip(dtnames, [np.asscalar(p) for p in synapse_array])))
synapse.link(
cartridge.neurons['L2'],
cartridge.neighbors[3].neighbors[2].neurons['Dm3'])
synapse_list.append(synapse)
if cartridge.neighbors[5] is not None:
if cartridge.neighbors[5].neighbors[6] is not None:
synapse = Synapse(
dict(zip(dtnames, [np.asscalar(p) for p in synapse_array])))
synapse.link(
cartridge.neurons['L2'],
cartridge.neighbors[5].neighbors[6].neurons['Dm3'])
synapse_list.append(synapse)
elif cartridge.neighbors[6] is not None:
if cartridge.neighbors[6].neighbors[5] is not None:
synapse = Synapse(
dict(zip(dtnames, [np.asscalar(p) for p in synapse_array])))
synapse.link(
cartridge.neurons['L2'],
cartridge.neighbors[6].neighbors[5].neurons['Dm3'])
synapse_list.append(synapse)
self.composition_rules.append({'synapses': synapse_list, 'num':2})
def connect_composition_III(self):
synapse_list = []
Mt3v_list = self.non_columnar_neurons['Mt3v']
Mt3h_list = self.non_columnar_neurons['Mt3h']
for neuron in Mt3v_list:
neuron.assign_pos(0., 0.)
neuron.params['circuit'] = 'cr3'
for neuron in Mt3h_list:
neuron.assign_pos(0., 0.)
neuron.params['circuit'] = 'cr3'
rule3synapsesv = self.other_synapse_dict[self.other_synapse_dict['postname'] == 'Mt3v']
rule3synapsesh = self.other_synapse_dict[self.other_synapse_dict['postname'] == 'Mt3h']
dtnames = rule3synapsesv.dtype.names
for cartridge in self.cartridges:
synapse = Synapse(dict(zip(dtnames, [np.asscalar(p) for p in rule3synapsesv[0]])))
mtn = int(np.floor(cartridge.neurons['L2'].ypos / ((self.hexarray.Y[-1][-1]+1)/4)))
synapse.link(cartridge.neurons['L2'], Mt3v_list[mtn])
synapse_list.append(synapse)
synapse = Synapse(dict(zip(dtnames, [np.asscalar(p) for p in rule3synapsesh[0]])))
mtn = int(np.floor(cartridge.neurons['L2'].xpos / ((self.hexarray.X[-1][-1]+1)/4)))
synapse.link(cartridge.neurons['L2'], Mt3h_list[mtn])
synapse_list.append(synapse)
self.composition_rules.append({'synapses': synapse_list, 'num':3})
def __repr__(self):
if hasattr(self, 'cartridges'):
return 'Medulla LPU with '+str(len(self.cartridges))+' cartridges'
else:
return 'Medulla LPU unconfigured'
class Cartridge(object):
def __init__(self, neuron, connection):
self.connected = False
self.neuron_list = neuron.copy()
self.synapse_list = connection.copy()
self.neurons = collections.OrderedDict()
dtnames = self.neuron_list.dtype.names
for neuron_dict in self.neuron_list:
self.neurons.update(
{neuron_dict['name']:
Neuron(dict(zip(dtnames, [np.asscalar(p) for p in neuron_dict])))})
dtnames = self.synapse_list.dtype.names
self.synapses = []
for synapse_dict in self.synapse_list:
synapse = Synapse(
dict(zip(dtnames, [np.asscalar(p) for p in synapse_dict])))
synapse.link(self.neurons[synapse.prename],
self.neurons[synapse.postname])
self.synapses.append(synapse)
def set_neighbors(self, neighbor_cartridges):
self.neighbors = []
for i in range(7):
self.neighbors.append(neighbor_cartridges[i])
def assign_pos(self, num, row, col, xpos, ypos):
self.num = num
self.row = row
self.col = col
self.xpos = xpos
self.ypos = ypos
for neurons in self.neurons:
self.neurons[neurons].assign_pos(xpos, ypos)
self.connected = True
def position(self):
return (self.xpos, self.ypos)
def __repr__(self):
if self.connected:
return 'Cartridge at ' + str(self.position())
else:
return 'Isolated cartridge at '+ hex(id(self))
def get_num(self):
return self.num
def get_xpos(self):
return self.xpos
def get_ypos(self):
return self.ypos
def replace_dummy(self, name, neuron):
removed_synapse_list = []
neuron_to_be_replaced = self.neurons[name]
if not neuron_to_be_replaced.dummy:
raise ValueError("Neuron to be replaced is not dummy element")
for synapse in neuron_to_be_replaced.outgoing_synapses:
flag = self.remove_synapse(synapse)
synapse.replace_pre(neuron)
if flag:
removed_synapse_list.append(synapse)
for synapse in neuron_to_be_replaced.incoming_synapses:
flag = self.remove_synapse(synapse)
synapse.replace_post(neuron)
if flag:
removed_synapse_list.append(synapse)
self.neurons[name].set_parent(neuron)
#self.remove_neuron(name)
return removed_synapse_list
def remove_neuron(self, name):
self.neurons.pop(name)
def remove_synapse(self, synapse):
# the try/except here is to deal with Am to Am connection that
# may have been removed previously by another Am in the same cartridge
try:
self.synapses.remove(synapse)
return True
except:
return False
class Neuron(object):
def __init__(self, param_dict):
self.params = param_dict.copy()
spiking = False
self.params.update({'spiking': spiking})
if 'dummy' in self.params.keys():
self.dummy = self.params.pop('dummy')
else:
self.dummy = False
self.outgoing_synapses = []
self.incoming_synapses = []
@property
def name(self):
return self.params['name']
def add_outgoing_synapse(self, synapse):
self.outgoing_synapses.append(synapse)
def add_incoming_synapse(self, synapse):
self.incoming_synapses.append(synapse)
def remove_outgoing_synapse(self, synapse):
self.outgoing_synapses.remove(synapse)
def remove_incoming_synapse(self, synapse):
self.incoming_synapses.remove(synapse)
def __repr__(self):
return 'neuron '+self.params['name']+': '+str(self.params)
def __str__(self):
return 'neuron '+str(self.params['name'])
def assign_pos(self, xpos, ypos):
self.params.update({'xpos': xpos, 'ypos': ypos})
def position(self):
return (self.params['xpos'], self.params['ypos'])
@property
def xpos(self):
return self.params['xpos']
@property
def ypos(self):
return self.params['ypos']
def add_num(self, num):
self.num = num
def process_before_export(self):
self.params.update({'n_dendrites': len(self.incoming_synapses),
'n_outputs': len(self.outgoing_synapses)})
if 'columnar' in self.params.keys():
del self.params['columnar']
self.params['input'] = bool(self.params['input'])
self.params['output'] = bool(self.params['output'])
self.params['public'] = bool(self.params['public'])
self.params['extern'] = bool(self.params['extern'])
self.params['model'] = str(self.params['model'])
def is_public(self):
return self.params['public']
def add_selector(self, selector):
self.params['selector'] = selector
@property
def selector(self):
return self.params['selector']
def set_parent(self, neuron):
self.parent = neuron
class Synapse(object):
def __init__(self, param_dict):
self.params = param_dict.copy()
self.params.update({'conductance': True})
def link(self, pre_neuron, post_neuron):
self.pre_neuron = pre_neuron
self.post_neuron = post_neuron
self.pre_neuron.add_outgoing_synapse(self)
self.post_neuron.add_incoming_synapse(self)
self.update_class(self.get_class(self.pre_neuron, self.post_neuron))
def replace_pre(self, pre_neuron):
self.pre_neuron = pre_neuron
self.pre_neuron.add_outgoing_synapse(self)
self.params['prename'] = pre_neuron.name
def replace_post(self, post_neuron):
self.post_neuron = post_neuron
self.post_neuron.add_incoming_synapse(self)
self.params['postname'] = post_neuron.name
def __repr__(self):
return ('synapse from '+self.params['prename']+' to ' + self.params['postname']
+ ': '+str(self.params))
def __str__(self):
return 'synapse '+str(self.params['prename'])+' to '+self.params['postname']
def process_before_export(self):
if 'cart' in self.params.keys():
del self.params['cart']
if 'scale' in self.params.keys():
self.params['slope'] *= self.params['scale']
self.params['saturation'] *= self.params['scale']
del self.params['scale']
self.params['model'] = str(self.params['model'])
@staticmethod
def get_class(preneuron, postneuron):
""" preneuron: Neuron instance
postneuron: Neuron instance
"""
is_pre_spk = preneuron.params['spiking']
is_post_spk = postneuron.params['spiking']
if is_pre_spk and is_post_spk:
return 0
elif is_pre_spk and not is_post_spk:
return 1
elif not is_pre_spk and is_post_spk:
return 2
elif not is_pre_spk and not is_post_spk:
return 3
def update_class(self, cls):
self.params.update({'class': cls})
@property
def prename(self):
return self.params['prename']
@property
def postname(self):
return self.params['postname']
def create_pattern(n_dict_1, n_dict_2, save_as=None):
"""
If `save_as` is not None, save the pattern in GEXF format as the specified file name.
"""
lpu1_sel_in_gpot = plsel.Selector(LPU.extract_in_gpot(n_dict_1))
lpu1_sel_out_gpot = plsel.Selector(LPU.extract_out_gpot(n_dict_1))
lpu2_sel_in_gpot = plsel.Selector(LPU.extract_in_gpot(n_dict_2))
lpu2_sel_out_gpot = plsel.Selector(LPU.extract_out_gpot(n_dict_2))
lpu1_sel_in_spike = plsel.Selector(LPU.extract_in_spk(n_dict_1))
lpu1_sel_out_spike = plsel.Selector(LPU.extract_out_spk(n_dict_1))
lpu2_sel_in_spike = plsel.Selector(LPU.extract_in_spk(n_dict_2))
lpu2_sel_out_spike = plsel.Selector(LPU.extract_out_spk(n_dict_2))
lpu1_sel_out = plsel.Selector.union(lpu1_sel_out_gpot, lpu1_sel_out_spike)
lpu2_sel_out = plsel.Selector.union(lpu2_sel_out_gpot, lpu2_sel_out_spike)
lpu1_sel_in = plsel.Selector.union(lpu1_sel_in_gpot, lpu1_sel_in_spike)
lpu2_sel_in = plsel.Selector.union(lpu2_sel_in_gpot, lpu2_sel_in_spike)
lpu1_sel = plsel.Selector.union(lpu1_sel_out, lpu1_sel_in)
lpu2_sel = plsel.Selector.union(lpu2_sel_out, lpu2_sel_in)
Neuron_list_12 = ['L1', 'L2', 'L3', 'L4', 'L5', 'T1']
Neuron_list_21 = ['C2', 'C3']
gpot_sel = plsel.Selector.union(lpu1_sel_out_gpot, lpu1_sel_in_gpot,
lpu2_sel_out_gpot, lpu2_sel_in_gpot)
spike_sel = plsel.Selector.union(lpu1_sel_out_spike, lpu1_sel_in_spike,
lpu2_sel_out_spike, lpu2_sel_in_spike)
Neuron_str_12 = '['+','.join(Neuron_list_12)+']'
Neuron_str_21 = '['+','.join(Neuron_list_21)+']'
cart_str = '['+','.join(['cart%i' % i for i in range(768)])+']'
from_sel_12 = '/lamina'+cart_str+Neuron_str_12
to_sel_12 = '/medulla'+cart_str+Neuron_str_12
from_sel_21 = '/medulla'+cart_str+Neuron_str_21
to_sel_21 = '/lamina'+cart_str+Neuron_str_21
from_sel = from_sel_12 + ',' + from_sel_21
to_sel = to_sel_12 + ',' + to_sel_21
pat = Pattern.from_concat(lpu1_sel, lpu2_sel,
from_sel=from_sel, to_sel=to_sel,
gpot_sel=gpot_sel, spike_sel=spike_sel, data=1)
if save_as:
nx.write_gexf(pat.to_graph(), save_as, prettyprint=True)
return pat
def append_field(rec, name, arr, dtype=None):
arr = np.asarray(arr)
if dtype is None:
dtype = arr.dtype
newdtype = np.dtype(rec.dtype.descr + [(name, dtype)])
newrec = np.empty(rec.shape, dtype=newdtype)
for field in rec.dtype.fields:
newrec[field] = rec[field]
newrec[name] = arr
return newrec
|
neurokernel/vision
|
examples/data/vision_configuration.py
|
Python
|
bsd-3-clause
| 33,683
|
[
"NEURON"
] |
707b1e675bb07a043a9f6c3cfeabe8339c9751b41b2c1ec70784ccf244371c8e
|
# -*- coding: utf-8 -*-
# @Author: YangZhou
# @Date: 2016-09-05 19:22:06
# @Last Modified by: YangZhou
# @Last Modified time: 2017-06-18 22:36:31
import aces.tools as tl
from aces.runners.phonopy import runner as Runner
class runner(Runner):
def generate(self):
tl.cp('minimize/POSCAR', '.')
self.getVaspRun_vasp()
def q(self):
a = tl.shell_exec(
"grep TOTEN OUTCAR |tail -1").split("=")[1].strip().replace("eV", "")
print(self.m.ecut, a)
|
vanceeasleaf/aces
|
aces/runners/scf.py
|
Python
|
gpl-2.0
| 500
|
[
"phonopy"
] |
0dbd4a9332baa06f355870713a3656c8fb5a9c706d02b491a4b276d3606ab50e
|
#
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# author: Reza Hosseini
""" This is an example of using the code to sequence the data"""
import sys
import os
import numpy as np
import pandas as pd
import random
import time
import datetime
import numpy as np
import time as time
import matplotlib
import matplotlib.pyplot as plt
import seaborn as sns
import math as math
import re as re
import inspect
import scipy as scipy
import functools
import itertools
import operator
import warnings
import json
import IPython
import hashlib
import base64
def GetContent(fn):
with open(fn, 'r') as f:
content = f.read()
return content
## specify the path for the source code
path = ''
srcFns = [path + 'expt-analysis/python/data_analysis.py',
path + 'expt-analysis/python/sequential_data.py']
for fn in srcFns: exec(GetContent(fn=fn))
# upload your data or simulate data by running this:
# Simulating data for demo purpose: (analyst does this)
df = Sim_depUsageData(userNum=5, subSeqLen=4, repeatPattern=None)
## the simulated data is already sorted and has the correct timestamp
df['date'].value_counts()
"""
This function takes timestamped event data and create sequential data.
Inputs
df: data frame which has the data
timeCol: the column which include the event times
timeColEnd: the column which ends the end of the event, this could be passed
same as timeCol
seqDimCols: these are the building blocks for the sequence elements
for example [form_factor, product]
partitionCols: these are partition columns used to partition the data.
you will be able to slice by them in the sequential data generated.
for example partitionCols = [user_id, country]
timeGap: the length of time gap (inactivity) used to break the sequences.
seqPropCols: columns which are properties of events to be also tracked.
we build parallel sequences to the main sequence using these properties.
for example if seqPropCols = []
seqPropColsDeduped: a subset of seqPropCols which are to be deduped as well
ordered: If this is True the code will assume the data is already ordered wrt
time. If not it will order the data.
Output:
output is a data frame which includes sequential data.
The sequences are denoted as a1>a2>a3 where ">" is the separator
full_[col]_parallel: for a property given in col,
(we refer to these properties in code by seqPropCols),
this is the parallel sequence to “full_seq_deduped
full_seq_deduped: this is the full sequence after complete deduping
full_seq_basket: this is the basket (set) of elements appearing
in the full sequence
trimmed_seq_deduped: this is the sequence after deduping and trimming.
This is usually the most important dimension for many use cases
trimmed_seq_basket: this is the set of elements appearing
in the trimmed sequence given in trimmed_seq_basket
trimmed_[col]_parallel: for a given property in col, e.g. form_factor,
this is the parallel sequence to the trimmed sequence
seq_shift_order: the data includes full sequences of actions for a user visit,
but it is also augmented by shifted version of sequences.
To restrict the data to sequences which start from time zero,
choose: seq_shift_order=0
full_seq_undeduped_length: the length of the undeduped sequence
full_seq_deduped_length: you can restrict the sequences of the represented data
by using this variable. For example you can choose all lengths
bigger than 1 to explore flows better.
event_1, event_2, … You can restrict to for example second
event being equal to a particular event.
[col]_mix: if a sequence includes only one value for a property given in [col]
this will be equal to that values.
If the property includes multiple values during the sequence/journey
then its equal to “MIXED”. For example for col = [form_factor] we might have
a sequence which changes the form factor: COMP > PHONE > COMP
which will be assigned "MIXED"
[col]_parallel is the parallel sequence built along the main sequence to
track a specific property.
subseq_1_2, subseq_1_2_3, subseq_1_2_3_4: these are shorter versions of the
main sequence data given in "full_seq_deduped"
"""
# Generate the sequential data here from raw data
outputFileName = 'test_shifted_seq' #no suffix needed
timeCol = 'time'
# timeColEnd could be the same as timeCol if you don't have the end time
timeColEnd = 'end_time'
timeGap = 2*60
# make sure user_id column is a string column
df['user_id'] = df['user_id'].map(ShortHash)
partitionCols = ['user_id', 'country']
seqDimCols = ['prod', 'form_factor']
seqPropCols = ['prod', 'form_factor']
seqPropColsDeduped = seqPropCols
writePath = '~/work/tables/seq-data-analysis/'
trim = 3
seqDf = BuildAndWriteSeqDf(
df=df,
fn=outputFileName,
seqDimCols=seqDimCols,
partitionCols=partitionCols,
timeGap=timeGap,
trim=trim,
timeCol=timeCol,
timeColEnd=timeColEnd,
seqPropCols=seqPropCols,
seqPropColsDeduped=seqPropColsDeduped,
writePath=writePath,
addOrigSeqInfo=True,
addBasket=True,
addLagInfo=False,
lagTrim=3,
ordered=True,
addResetDate_seqStartDate=True)
# inspect results
Mark(seqDf.shape)
seqDf[0:5]
|
google/expt-analysis
|
python/sequential_analysis_example.py
|
Python
|
apache-2.0
| 5,624
|
[
"VisIt"
] |
731c9499efd5da23dd349b382710358d7a1226a47cf1a5b2d0ca700b4d61b963
|
r"""
===============
Gaussian Shells
===============
Toy likelihood model for stress testing multiple-ellipsoid method.
The problem is:
.. math::
\mathcal{L}(\theta) = \mathrm{circ}(\theta; c_1, r_1, w_1) +
\mathrm{circ}(\theta; c_2, r_2, w_2)
where
.. math::
\mathrm{circ}(\theta; c, r, w) = \frac{1}{\sqrt{2 \pi w^2}}
\exp \left[ - \frac{(|\theta - c| - r)^2}{2 w^2} \right]
"""
import math
import time
from collections import OrderedDict
import numpy as np
from numpy.random import RandomState
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import nestle
rstate = RandomState(0)
###############################################################################
# In the following block, we define the problem. We use r = 2 and w = 0.1,
# meaning that the gaussian is quite narrow compared to the size of the
# sphere.
r = 2.
w = 0.1
const = math.log(1. / math.sqrt(2. * math.pi * w**2))
def logcirc(theta, c):
d = np.sqrt(np.sum((theta - c)**2, axis=-1)) # |theta - c|
return const - (d - r)**2 / (2. * w**2)
def loglike(theta, c1, c2):
return np.logaddexp(logcirc(theta, c1), logcirc(theta, c2))
def prior_transform(x):
"""Defines a flat prior between -6 and 6 in all dimensions."""
return 12. * x - 6.
###############################################################################
# Visualize
# ---------
#
# It helps to visualize the surface in two dimensions. Here, we plot the
# likelihood evaluated on a fine grid and the sample points from nested
# sampling.
# likelihood surface in 2-d
xx, yy = np.meshgrid(np.linspace(-6., 6., 200),
np.linspace(-6., 6., 200))
c1 = np.array([-3.5, 0.])
c2 = np.array([3.5, 0.])
Z = np.exp(loglike(np.dstack((xx, yy)), c1, c2))
# nested sampling result
c1 = np.array([-3.5, 0.])
c2 = np.array([3.5, 0.])
f = lambda theta: loglike(theta, c1, c2)
res = nestle.sample(f, prior_transform, 2, method='multi', npoints=1000,
rstate=rstate)
fig = plt.figure(figsize=(14., 6.))
ax = fig.add_subplot(121, projection='3d')
ax.plot_surface(xx, yy, Z, rstride=1, cstride=1, linewidth=0, cmap='coolwarm')
ax.set_xlim(-6., 6.)
ax.set_ylim(-6., 6.)
ax.set_zlim(0., 4.)
ax.set_zlabel('L')
ax.set_title('Likelihood evaluated on fine grid')
ax = fig.add_subplot(122, projection='3d')
ax.scatter(res.samples[:,0], res.samples[:, 1], np.exp(res.logl),
marker='.', c=np.exp(res.logl), linewidths=(0.,), cmap='coolwarm')
ax.set_xlim(-6., 6.)
ax.set_ylim(-6., 6.)
ax.set_zlim(0., 4.)
ax.set_zlabel('L')
ax.set_title('Nested sampling points');
###############################################################################
# Scaling with dimension
# ----------------------
#
# Here, we demonstrate how the algorithm scales with dimension and compare
# the total evidence to the analytic answer.
npoints = 1000
def run(ndim):
"""Convenience function for running in any dimension"""
c1 = np.zeros(ndim)
c1[0] = -3.5
c2 = np.zeros(ndim)
c2[0] = 3.5
f = lambda theta: loglike(theta, c1, c2)
return nestle.sample(f, prior_transform, ndim, method='multi',
npoints=npoints, rstate=rstate)
# Run over dimensions and save time for each run.
results = OrderedDict()
for ndim in [2, 5, 10, 20]:
t0 = time.time()
results[ndim] = run(ndim)
results[ndim].time = time.time() - t0
analytic_logz = {2: -1.75,
5: -5.67,
10: -14.59,
20: -36.09}
print("D analytic logz logzerr nlike eff(%) time")
for ndim, res in results.items():
eff = 100. * res.niter/(res.ncall - npoints)
print("{:2d} {:6.2f} {:6.2f} {:4.2f} {:6d} {:5.2f} {:6.2f}"
.format(ndim, analytic_logz[ndim], res.logz, res.logzerr,
res.ncall, eff, res.time))
|
keflavich/nestle
|
examples/plot_shells.py
|
Python
|
mit
| 3,872
|
[
"Gaussian"
] |
0d4261b5dd7f78d81972b85bf1ed521dd312bcb6218326ec73b4fd0ba873ce4b
|
# -*- coding: utf-8 -*-
# vi:si:et:sw=4:sts=4:ts=4
##
## Copyright (C) 2010 Async Open Source <http://www.async.com.br>
## All rights reserved
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU Lesser General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU Lesser General Public License for more details.
##
## You should have received a copy of the GNU Lesser General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., or visit: http://www.gnu.org/.
##
## Author(s): Stoq Team <stoq-devel@async.com.br>
##
"""Payment Flow History Report Dialog"""
from storm.expr import And, Eq, Or
from stoqlib.database.expr import Date
from stoqlib.gui.dialogs.daterangedialog import DateRangeDialog
from stoqlib.gui.utils.printing import print_report
from stoqlib.lib.message import info
from stoqlib.lib.translation import stoqlib_gettext
from stoqlib.reporting.payment import PaymentFlowHistoryReport
_ = stoqlib_gettext
# A few comments for the payment_flow_query:
# - The first table in the FROM clause is the list of all possible dates
# (due_date and paid_date) in the results. This is done so that the subsequent
# subselect can be joined properly
# - In that same subselect, we use IS NOT NULL to avoid an empty row for
# payments that were not received yet.
# - We filter out statuses (0, 5) to not include PREVIEW and CANCELED payments
# - payment_type = 1 are OUT_PAYMENTS and 0 are IN_PAYMENTS
payment_flow_query = """
SELECT all_payment_dates.date,
COALESCE(payments_to_pay.count, 0) as to_pay_payments,
COALESCE(payments_to_pay.to_pay, 0) as to_pay,
COALESCE(payments_paid.count, 0) as paid_payments,
COALESCE(payments_paid.paid, 0) as paid,
COALESCE(payments_to_receive.count, 0) as to_receive_payments,
COALESCE(payments_to_receive.to_receive, 0) as to_receive,
COALESCE(payments_received.count, 0) as received_payments,
COALESCE(payments_received.received, 0) as received
FROM (SELECT date(due_date) as date FROM payment
UNION SELECT date(paid_date) as date FROM payment WHERE
paid_date IS NOT NULL) as all_payment_dates
-- To pay (out payments)
LEFT JOIN (SELECT DATE(due_date) as date, count(1) as count, sum(value) as to_pay
FROM payment WHERE payment_type = 'out' AND status not in ('preview', 'cancelled')
GROUP BY DATE(due_date))
AS payments_to_pay ON (all_payment_dates.date = payments_to_pay.date)
-- Paid (out payments)
LEFT JOIN (SELECT DATE(paid_date) as date, count(1) as count, sum(value) as paid
FROM payment WHERE payment_type = 'out'
AND payment.status not in ('preview', 'cancelled')
GROUP BY DATE(paid_date))
AS payments_paid ON (all_payment_dates.date = payments_paid.date)
-- To receive (in payments)
LEFT JOIN (SELECT DATE(due_date) as date, count(1) as count, sum(value) as to_receive
FROM payment WHERE payment_type = 'in'
AND payment.status not in ('preview', 'cancelled')
GROUP BY DATE(due_date))
AS payments_to_receive ON (all_payment_dates.date = payments_to_receive.date)
-- Received (in payments)
LEFT JOIN (SELECT DATE(paid_date) as date, count(1) as count, sum(value) as received
FROM payment WHERE payment_type = 'in'
AND payment.status not in ('preview', 'cancelled')
GROUP BY DATE(paid_date))
AS payments_received ON (all_payment_dates.date = payments_received.date)
ORDER BY all_payment_dates.date;
"""
class PaymentFlowDay(object):
def __init__(self, store, row, previous_day=None):
"""Payment Flow History for a given date
:param row: A list of values from the payment_flow_query above
:param previous_day: The `previous_day <PaymentFlowDay>`. This is used
to calculate the expected and real balances for each day (based on the
previous dates).
"""
(date, to_pay_count, to_pay, paid_count, paid, to_receive_count,
to_receive, received_count, received) = row
self.history_date = date
# values
self.to_pay = to_pay
self.to_receive = to_receive
self.paid = paid
self.received = received
# counts
self.to_pay_payments = to_pay_count
self.to_receive_payments = to_receive_count
self.paid_payments = paid_count
self.received_payments = received_count
if previous_day:
self.previous_balance = previous_day.balance_real
else:
self.previous_balance = 0
# Today's balance is the previous day balance, plus the payments we
# received, minus what we paid. expected if for the payments we should
# have paid/received
self.balance_expected = self.previous_balance + to_receive - to_pay
self.balance_real = self.previous_balance + received - paid
self.store = store
def get_divergent_payments(self):
"""Returns a :class:`Payment` sequence that meet the following requirements:
* The payment due date, paid date or cancel date is the current
PaymentFlowHistory date.
* The payment was paid/received with different values (eg with
discount or surcharge).
* The payment was scheduled to be paid/received on the current,
but it was not.
* The payment was not expected to be paid/received on the current date.
"""
from stoqlib.domain.payment.payment import Payment
date = self.history_date
query = And(Or(Date(Payment.due_date) == date,
Date(Payment.paid_date) == date,
Date(Payment.cancel_date) == date),
Or(Eq(Payment.paid_value, None),
Payment.value != Payment.paid_value,
Eq(Payment.paid_date, None),
Date(Payment.due_date) != Date(Payment.paid_date)))
return self.store.find(Payment, query)
@classmethod
def get_flow_history(cls, store, start, end):
"""Get the payment flow history for a given date interval
This will return a list of PaymentFlowDay, one for each date that has
payments registered and are in the interval specified.
"""
history = []
previous_entry = None
for row in store.execute(payment_flow_query).get_all():
entry = cls(store, row, previous_entry)
if entry.history_date > end:
break
# We only store entries for dates higher than the user requested, but
# we still need to create the entries from the beginning, so we
# have the real balances
if entry.history_date >= start:
history.append(entry)
previous_entry = entry
return history
class PaymentFlowHistoryDialog(DateRangeDialog):
title = _(u'Payment Flow History Dialog')
desc = _("Select a date or a range to be visualised in the report:")
size = (-1, -1)
def __init__(self, store):
"""A dialog to print the PaymentFlowHistoryReport report.
:param store: a store
"""
self.store = store
DateRangeDialog.__init__(self, title=self.title, header_text=self.desc)
#
# BasicDialog
#
def confirm(self):
DateRangeDialog.confirm(self)
start = self.retval.start
end = self.retval.end
results = PaymentFlowDay.get_flow_history(self.store, start, end)
if not results:
info(_('No payment history found.'))
return False
print_report(PaymentFlowHistoryReport, payment_histories=results)
return True
|
andrebellafronte/stoq
|
stoqlib/gui/dialogs/paymentflowhistorydialog.py
|
Python
|
gpl-2.0
| 8,070
|
[
"VisIt"
] |
ba4d76032d76c05c4d0b79d33be573a94d59ef0c4bf7b7ef24d4f2621c8c3fd1
|
import tensorflow as tf
import numpy as np
import autoencoder.Utils
class VariationalAutoencoder(object):
def __init__(self, n_input, n_hidden, optimizer = tf.train.AdamOptimizer()):
self.n_input = n_input
self.n_hidden = n_hidden
network_weights = self._initialize_weights()
self.weights = network_weights
# model
self.x = tf.placeholder(tf.float32, [None, self.n_input])
self.z_mean = tf.add(tf.matmul(self.x, self.weights['w1']), self.weights['b1'])
self.z_log_sigma_sq = tf.add(tf.matmul(self.x, self.weights['log_sigma_w1']), self.weights['log_sigma_b1'])
# sample from gaussian distribution
eps = tf.random_normal(tf.pack([tf.shape(self.x)[0], self.n_hidden]), 0, 1, dtype = tf.float32)
self.z = tf.add(self.z_mean, tf.mul(tf.sqrt(tf.exp(self.z_log_sigma_sq)), eps))
self.reconstruction = tf.add(tf.matmul(self.z, self.weights['w2']), self.weights['b2'])
# cost
reconstr_loss = 0.5 * tf.reduce_sum(tf.pow(tf.sub(self.reconstruction, self.x), 2.0))
latent_loss = -0.5 * tf.reduce_sum(1 + self.z_log_sigma_sq
- tf.square(self.z_mean)
- tf.exp(self.z_log_sigma_sq), 1)
self.cost = tf.reduce_mean(reconstr_loss + latent_loss)
self.optimizer = optimizer.minimize(self.cost)
init = tf.initialize_all_variables()
self.sess = tf.Session()
self.sess.run(init)
def _initialize_weights(self):
all_weights = dict()
all_weights['w1'] = tf.Variable(autoencoder.Utils.xavier_init(self.n_input, self.n_hidden))
all_weights['log_sigma_w1'] = tf.Variable(autoencoder.Utils.xavier_init(self.n_input, self.n_hidden))
all_weights['b1'] = tf.Variable(tf.zeros([self.n_hidden], dtype=tf.float32))
all_weights['log_sigma_b1'] = tf.Variable(tf.zeros([self.n_hidden], dtype=tf.float32))
all_weights['w2'] = tf.Variable(tf.zeros([self.n_hidden, self.n_input], dtype=tf.float32))
all_weights['b2'] = tf.Variable(tf.zeros([self.n_input], dtype=tf.float32))
return all_weights
def partial_fit(self, X):
cost, opt = self.sess.run((self.cost, self.optimizer), feed_dict={self.x: X})
return cost
def calc_total_cost(self, X):
return self.sess.run(self.cost, feed_dict = {self.x: X})
def transform(self, X):
return self.sess.run(self.z_mean, feed_dict={self.x: X})
def generate(self, hidden = None):
if hidden is None:
hidden = np.random.normal(size=self.weights["b1"])
return self.sess.run(self.reconstruction, feed_dict={self.z_mean: hidden})
def reconstruct(self, X):
return self.sess.run(self.reconstruction, feed_dict={self.x: X})
def getWeights(self):
return self.sess.run(self.weights['w1'])
def getBiases(self):
return self.sess.run(self.weights['b1'])
|
plowman/python-mcparseface
|
models/autoencoder/autoencoder_models/VariationalAutoencoder.py
|
Python
|
apache-2.0
| 2,980
|
[
"Gaussian"
] |
2513ae23722d66f011aaff4c4e41d260853a0b72d59f72b91b77ddb2b82d1975
|
## \file
## \ingroup tutorial_pyroot
## \notebook
## Example of function called when a mouse event occurs in a pad.
## When moving the mouse in the canvas, a second canvas shows the
## projection along X of the bin corresponding to the Y position
## of the mouse. The resulting histogram is fitted with a gaussian.
## A "dynamic" line shows the current bin position in Y.
## This more elaborated example can be used as a starting point
## to develop more powerful interactive applications exploiting CINT
## as a development engine.
##
## Note that a class is used to hold on to the canvas that display
## the selected slice.
##
## \macro_image
## \macro_code
##
## \author Rene Brun, Johann Cohen-Tanugi, Wim Lavrijsen
import sys
from ROOT import gRandom, gPad, gROOT, gVirtualX
from ROOT import kTRUE, kRed
from ROOT import TCanvas, TH2, TH2F, Double
class DynamicExec:
def __init__( self ):
self._cX = None
self._cY = None
self._old = None
def __call__( self ):
h = gPad.GetSelected();
if not h:
return
if not isinstance( h, TH2 ):
return
gPad.GetCanvas().FeedbackMode( kTRUE )
# erase old position and draw a line at current position
px = gPad.GetEventX()
py = gPad.GetEventY()
uxmin, uxmax = gPad.GetUxmin(), gPad.GetUxmax()
uymin, uymax = gPad.GetUymin(), gPad.GetUymax()
pxmin, pxmax = gPad.XtoAbsPixel( uxmin ), gPad.XtoAbsPixel( uxmax )
pymin, pymax = gPad.YtoAbsPixel( uymin ), gPad.YtoAbsPixel( uymax )
if self._old != None:
gVirtualX.DrawLine( pxmin, self._old[1], pxmax, self._old[1] )
gVirtualX.DrawLine( self._old[0], pymin, self._old[0], pymax )
gVirtualX.DrawLine( pxmin, py, pxmax, py )
gVirtualX.DrawLine( px, pymin, px, pymax )
self._old = px, py
upx = gPad.AbsPixeltoX( px )
x = gPad.PadtoX( upx )
upy = gPad.AbsPixeltoY( py )
y = gPad.PadtoY( upy )
padsav = gPad
# create or set the display canvases
if not self._cX:
self._cX = TCanvas( 'c2', 'Projection Canvas in X', 730, 10, 700, 500 )
else:
self._DestroyPrimitive( 'X' )
if not self._cY:
self._cY = TCanvas( 'c3', 'Projection Canvas in Y', 10, 550, 700, 500 )
else:
self._DestroyPrimitive( 'Y' )
self.DrawSlice( h, y, 'Y' )
self.DrawSlice( h, x, 'X' )
padsav.cd()
def _DestroyPrimitive( self, xy ):
proj = getattr( self, '_c'+xy ).GetPrimitive( 'Projection '+xy )
if proj:
proj.IsA().Destructor( proj )
def DrawSlice( self, histo, value, xy ):
yx = xy == 'X' and 'Y' or 'X'
# draw slice corresponding to mouse position
canvas = getattr( self, '_c'+xy )
canvas.SetGrid()
canvas.cd()
bin = getattr( histo, 'Get%saxis' % xy )().FindBin( value )
hp = getattr( histo, 'Projection' + yx )( '', bin, bin )
hp.SetFillColor( 38 )
hp.SetName( 'Projection ' + xy )
hp.SetTitle( xy + 'Projection of bin=%d' % bin )
hp.Fit( 'gaus', 'ql' )
hp.GetFunction( 'gaus' ).SetLineColor( kRed )
hp.GetFunction( 'gaus' ).SetLineWidth( 6 )
canvas.Update()
if __name__ == '__main__':
# create a new canvas.
c1 = TCanvas('c1', 'Dynamic Slice Example', 10, 10, 700, 500 )
c1.SetFillColor( 42 )
c1.SetFrameFillColor( 33 )
# create a 2-d histogram, fill and draw it
hpxpy = TH2F( 'hpxpy', 'py vs px', 40, -4, 4, 40, -4, 4 )
hpxpy.SetStats( 0 )
x, y = Double( 0.1 ), Double( 0.101 )
for i in xrange( 50000 ):
gRandom.Rannor( x, y )
hpxpy.Fill( x, y )
hpxpy.Draw( 'COL' )
# Add a TExec object to the canvas (explicit use of __main__ is for IPython)
import __main__
__main__.slicer = DynamicExec()
c1.AddExec( 'dynamic', 'TPython::Exec( "slicer()" );' )
c1.Update()
|
veprbl/root
|
tutorials/pyroot/DynamicSlice.py
|
Python
|
lgpl-2.1
| 3,860
|
[
"Gaussian"
] |
b6266b4a6a085931926f23889288f89233878afb5427da3c17156dcaa8932484
|
"""
A collection of functions for analyzing tuning properties of the cells.
"""
import numpy as np
def resp_vs_attr(sims, attr, resp_type = "t_resp", rc=[0.0, 0.0], indices=None):
"""
returns the max response of every cell in
each simulation with respect to a given simulation
attribute (attr).
Parameters
----------
sims : list
list of Simulation objects
attr: str
name of the attribute
resp_type: str
response type: t_resp, w_resp
rc: array_like
neuron position indices
indices:
response indices
Returns
-------
dict
max response of all cells with with
respect to attr.
"""
from collections import defaultdict
resps = defaultdict(list)
for sim in sims:
resps[attr].append(sim.get_attribute(attr))
for cell_type in sim.cell_types:
neuron = getattr(sim, cell_type)
if(indices is not None):
resp = getattr(neuron, resp_type)(rc)[[indices]]
else:
resp = getattr(neuron, resp_type)(rc)
#resp = np.absolute(resp).max()
resp = max(resp, key=np.absolute)
resps[str(cell_type)].append(resp)
return resps
def resp_vs_attrA_vs_attrB(sims, attrA, attrB, resp_type = "t_resp", rc=[0.0, 0.0], indices=None):
"""
returns the max response with respect to
attributes attrA and attrB.
Parameters
----------
sims : list
list of Simulation objects
attrA: str
name of the attribute A
attrB: str
name of the attribute B
resp_type: str
response type: t_resp, w_resp
rc: array_like
neuron position indices
indices:
response indices
Returns
-------
resp: dict
2d response array (attrA x attrB).
attrA_vec: array
array with attrA values
attrB_vec:
array with attrB values
"""
import data_extractor as de
attrA_vec = de.extract_unique_simulation_attrs(sims, attrA)
attrB_vec = de.extract_unique_simulation_attrs(sims, attrB)
resp={}
for cell_type in sims[0].cell_types:
resp[str(cell_type)] = np.zeros([len(attrA_vec), len(attrB_vec)])
for i, a in enumerate(attrA_vec):
sims_ext = de.simulation_extractor(sims, attrA, a)
data = resp_vs_attr(sims_ext, attrB, resp_type, rc, indices)
sorted_indices = np.argsort(data[attrB])
for cell_type in sims[0].cell_types:
resp[str(cell_type)][i,:] = np.array(data[cell_type])[sorted_indices]
return attrA_vec, attrB_vec, resp
|
miladh/lgn-simulator
|
tools/analysis/tuning_analysis.py
|
Python
|
gpl-3.0
| 2,629
|
[
"NEURON"
] |
3fa11700d6a8669b63d3fca00d0aefacbc1d3c1c330bb3e507aab68b7ea292e4
|
# Copyright (C) 2012,2013
# Max Planck Institute for Polymer Research
# Copyright (C) 2008,2009,2010,2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
r"""
***********************************************
**espressopp.analysis.VelocityAutocorrelation**
***********************************************
.. function:: espressopp.analysis.VelocityAutocorrelation(system)
:param system:
:type system:
"""
from espressopp.esutil import cxxinit
from espressopp import pmi
from espressopp.analysis.ConfigsParticleDecomp import *
from _espressopp import analysis_VelocityAutocorrelation
class VelocityAutocorrelationLocal(ConfigsParticleDecompLocal, analysis_VelocityAutocorrelation):
def __init__(self, system):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
cxxinit(self, analysis_VelocityAutocorrelation, system)
if pmi.isController:
class VelocityAutocorrelation(ConfigsParticleDecomp):
__metaclass__ = pmi.Proxy
pmiproxydefs = dict(
pmiproperty = [ 'print_progress' ],
cls = 'espressopp.analysis.VelocityAutocorrelationLocal'
)
|
capoe/espressopp.soap
|
src/analysis/VelocityAutocorrelation.py
|
Python
|
gpl-3.0
| 1,865
|
[
"ESPResSo"
] |
6c3a948c0e53dfce25baf9a2f10a120a3255e72aadd3c72c11120dd369a640cf
|
#!/usr/bin/env python -i
# preceding line should have path for Python on your machine
# split.py
# Purpose: similar to simple.py, but first the world communicator
# is split in two halves and LAMMPS is run only on one partition
# Syntax: split.py in.lammps
# in.lammps = LAMMPS input script
from __future__ import print_function
import sys
# parse command line
argv = sys.argv
if len(argv) != 2:
print("Syntax: simple.py in.lammps")
sys.exit()
infile = sys.argv[1]
me = 0
# this example *only* works with mpi4py version 2.0.0 or later
from mpi4py import MPI
comm = MPI.COMM_WORLD
me = comm.Get_rank()
nprocs = comm.Get_size()
# create two subcommunicators
if me < nprocs // 2: color = 0
else: color = 1
split = comm.Split(color,key=0)
if color == 0:
from lammps import lammps
lmp = lammps(comm=split)
# run infile one line at a time
lines = open(infile,'r').readlines()
for line in lines: lmp.command(line)
# run 10 more steps
# get coords from LAMMPS
# change coords of 1st atom
# put coords back into LAMMPS
# run a single step with changed coords
lmp.command("run 10")
x = lmp.gather_atoms("x",1,3)
epsilon = 0.1
x[0] += epsilon
lmp.scatter_atoms("x",1,3,x)
lmp.command("run 1");
f = lmp.extract_atom("f")
print("Force on 1 atom via extract_atom: ",f[0][0])
fx = lmp.extract_variable("fx","all",1)
print("Force on 1 atom via extract_variable:",fx[0])
print("Proc %d out of %d procs has" % (me,nprocs), lmp)
print("Calculation on partition 0 complete")
else:
# could run a 2nd calculation on second partition
# with different LAMMPS instance or another code
# in this case, just sleep on second partition
import time
time.sleep(2)
print("Calculation on partition 1 complete")
# shutdown mpi4py
comm.Barrier()
MPI.Finalize()
|
akohlmey/lammps
|
python/examples/split.py
|
Python
|
gpl-2.0
| 1,829
|
[
"LAMMPS"
] |
5b285f2a56fd2416d718897e20a909f5edf202b6e4a664be7061a1c30d08d465
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2011 L Fiaschi, T Kroeger, M Nullmaier, C Sommer, C Straehle, U Koethe, FA Hamprecht.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are
# permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this list of
# conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice, this list
# of conditions and the following disclaimer in the documentation and/or other materials
# provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE ABOVE COPYRIGHT HOLDERS ``AS IS'' AND ANY EXPRESS OR IMPLIED
# WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
# FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE ABOVE COPYRIGHT HOLDERS OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation are those of the
# authors and should not be interpreted as representing official policies, either expressed
# or implied, of their employers.
import __builtin__
from PackagesBase import Package
import os, platform
import urllib2, os, tarfile, shutil
import multiprocessing
###################################################################################################
class FFTW3(Package):
src_uri = 'http://fftw.org/fftw-3.2.2.tar.gz'
correctMD5sum = 'b616e5c91218cc778b5aa735fefb61ae'
workdir = 'fftw-3.2.2'
def conf_all(self):
return " --enable-shared --enable-portable-binary --disable-fortran --prefix=" + self.prefix
def configure_darwin(self):
return "./configure --disable-dependency-tracking --enable-static=no " + self.conf_all()
def configure_linux(self):
return "./configure " + self.conf_all()
###################################################################################################
class FFTW3F(Package):
src_uri = 'http://fftw.org/fftw-3.2.2.tar.gz'
correctMD5sum = 'b616e5c91218cc778b5aa735fefb61ae'
workdir = 'fftw-3.2.2'
def conf_all(self):
return " --enable-single --enable-shared --enable-portable-binary --disable-fortran --prefix=" + self.prefix
def configure_darwin(self):
return "./configure --disable-dependency-tracking --enable-static=no " + self.conf_all()
def configure_linux(self):
return "./configure " + self.conf_all()
###################################################################################################
class JpegPackage(Package):
src_uri = 'http://www.ijg.org/files/jpegsrc.v8c.tar.gz'
workdir = 'jpeg-8c'
def configure_darwin(self):
return "./configure --disable-dependency-tracking --enable-static=no --prefix=" + self.prefix
def configure_linux(self):
return "./configure --prefix=" + self.prefix
###################################################################################################
class TiffPackage(Package):
src_uri = 'http://download.osgeo.org/libtiff/tiff-3.9.4.tar.gz'
workdir ='tiff-3.9.4'
def configure_darwin(self):
return """./configure --enable-static=no \\
--disable-dependency-tracking \\
--with-apple-opengl-framework \\
--prefix=%s""" % self.prefix
def configure_linux(self):
return """./configure --prefix=%s""" % self.prefix
###################################################################################
class PngPackage(Package):
src_uri = 'http://prdownloads.sourceforge.net/libpng/libpng-1.4.5.tar.gz'
workdir = 'libpng-1.4.5'
def configure_darwin(self):
return """./configure --disable-dependency-tracking \\
--enable-static=no \\
--prefix=%s""" % self.prefix
def configure_linux(self):
return """./configure --enable-static=no \\
--prefix=%s""" % self.prefix
###################################################################################################
class SlibPackage(Package):
src_uri='http://www.hdfgroup.org/ftp/lib-external/szip/2.1/src/szip-2.1.tar.gz'
workdir = 'szip-2.1'
def configure_darwin(self):
return """./configure --disable-dependency-tracking \\
--enable-static=no \\
--prefix=%s""" % self.prefix
def configure_linux(self):
return """./configure --enable-static=no \\
--prefix=%s""" % self.prefix
###################################################################################################
class ZlibPackage(Package):
src_uri = 'http://zlib.net/zlib-1.2.5.tar.gz'
workdir = 'zlib-1.2.5'
def unpack(self):
Package.unpack(self)
def configure_darwin(self):
return """./configure --64 \\
--prefix='%s'""" % self.prefix
def configure_linux(self):
return """./configure --64 \\
--prefix='%s'""" % self.prefix
###################################################################################################
class Hdf5Package(Package):
src_uri = 'http://www.hdfgroup.org/ftp/HDF5/current/src/hdf5-1.8.7.tar.gz'
#correctMD5sum = 'df131d156634608e4a7bf26baeafc940'
workdir ='hdf5-1.8.7'
def unpack(self):
Package.unpack(self)
def configure_darwin(self):
return """./configure --disable-dependency-tracking \\
--enable-static=no \\
--prefix='%s'""" % self.prefix
def configure_linux(self):
return """./configure --enable-static=no \\
--prefix='%s'""" % self.prefix
###################################################################################################
class BoostPackage(Package):
src_uri = 'http://downloads.sourceforge.net/project/boost/boost/1.45.0/boost_1_45_0.tar.bz2'
workdir = 'boost_1_45_0'
#def unpack(self):
# pass
def configure(self):
#Package.configure(self)
if platform.system() == "Darwin":
self.oldCC = os.environ["CC"]
self.oldCXX = os.environ["CXX"]
os.environ["CC"] = gcc#+" -arch x86_64"
os.environ["CXX"] = gpp#+" -arch x86_64"
cmd = """./bootstrap.sh --prefix=%s \\
--with-python=%s \\
--with-libraries=python""" % (self.prefix, pythonExecutable)
self.system(cmd)
if platform.system() == "Darwin":
os.environ["CC"] = self.oldCC
os.environ["CXX"] = self.oldCXX
def make(self):
pass
def makeInstall(self):
self.system("./bjam install --prefix=%s" % self.prefix)
################################################################################
class PythonPackage(Package):
src_uri = 'http://www.python.org/ftp/python/2.7.1/Python-2.7.1.tar.bz2'
workdir = 'Python-2.7.1'
#patches=['patch-Mac-PythonLauncher-Makefile.in.diff']
def unpack(self):
Package.unpack(self)
def configure_darwin(self):
return """DESTDIR=%s \\
./configure --prefix=%s \\
--enable-framework=%s/Frameworks \\
""" % (self.prefix,self.prefix,self.prefix)
def configure_linux(self):
return "DESTDIR=%s ./configure --prefix=%s --enable-shared" \
% (self.prefix,self.prefix)
def make(self):
if platform.system() == 'Darwin':
self.system("find . -name Makefile | xargs -n1 sed -i '.bkp' -e \"s|PYTHONAPPSDIR=/Applications/|PYTHONAPPSDIR=%s/Applications/|g\"" % self.prefix)
self.system("make DESTDIR=%s" % self.prefix)
def makeInstall(self):
self.system("make install DESTDIR=''")
##################################################################################
class NosePackage(Package):
src_uri ='http://pkgs.fedoraproject.org/repo/pkgs/python-nose/nose-1.0.0.tar.gz/9542d4c66e04880d8144990de76e0b88/nose-1.0.0.tar.gz'
workdir = 'nose-1.0.0'
def unpack(self):
Package.unpack(self)
def configure(self):
pass
def make(self):
pass
def makeInstall(self):
self.system(pythonExecutable+" setup.py install")
###################################################################################################
class NumpyPackage(Package):
src_uri = 'http://sourceforge.net/projects/numpy/files/NumPy/1.5.1/numpy-1.5.1.tar.gz'
workdir = 'numpy-1.5.1'
def configure(self):
pass
def make(self):
self.system(pythonExecutable+" setup.py build")
def makeInstall(self):
self.system(pythonExecutable+" setup.py install")
######################################################################################
class QtPackage(Package):
src_uri = 'http://get.qt.nokia.com/qt/source/qt-everywhere-opensource-src-4.7.2.tar.gz'
correctMD5sum = '66b992f5c21145df08c99d21847f4fdb'
workdir = 'qt-everywhere-opensource-src-4.7.2'
patches = ['qtbug-15370.patch']
def unpack(self):
Package.unpack(self)
def configure(self):
macosxspecial = """ -no-framework \\
-no-sse3 -no-sse4.1 -no-sse4.2 -no-ssse3 -no-dwarf2 \\
"""
if platform.system() != "Darwin":
macosxspecial = ""
cmd = """echo 'yes' | ./configure %s \\
-opensource \\
-arch x86_64 \\
-optimized-qmake\\
-nomake examples\\
-nomake demos\\
-nomake docs\\
-nomake translations\\
-nomake tools\\
-no-multimedia -no-xmlpatterns -no-svg -no-audio-backend -no-phonon -no-phonon-backend -no-svg -no-webkit\\
-no-openssl\\
-no-declarative -no-declarative-debug\\
-no-script -no-scripttools -no-javascript-jit\\
-no-sql-sqlite -no-sql-sqlite2 -no-sql-psql -no-sql-db2 -no-sql-ibase -no-sql-mysql -no-sql-oci\\
-no-sql-odbc -no-sql-sqlite_symbian -no-sql-tds\\
-no-pch\\
-no-dbus\\
-no-cups\\
-no-nis\\
-qt-libpng\\
-fast -release -shared -no-accessibility\\
--prefix=%s""" % (macosxspecial,self.prefix,)
self.system(cmd)
def make(self, parallel = multiprocessing.cpu_count()):
self.system(make + " -j" + str(parallel))
#Also install Designer, which is needed by VTK
self.system(("cd tools/designer && ../../bin/qmake && %s -j"
+ str(parallel) + " && %s install")
% (make, make))
###########################################################################################################
class PyQtPackage(Package):
src_uri = "http://pkgs.fedoraproject.org/repo/pkgs/PyQt4/PyQt-x11-gpl-4.8.4.tar.gz/97c5dc1042feb5b3fe20baabad055af1/PyQt-x11-gpl-4.8.4.tar.gz"
correctMD5sum = '97c5dc1042feb5b3fe20baabad055af1'
workdir = 'PyQt-x11-gpl-4.8.4'
def configure_darwin(self):
return """%s configure.py \\
--confirm-license \\
--no-designer-plugin \\
-q %s/bin/qmake \\
--use-arch=x86_64""" % (pythonExecutable, self.prefix)
def configure_linux(self):
return """%s configure.py \\
--confirm-license \\
--no-designer-plugin \\
-q %s/bin/qmake """ % (pythonExecutable, self.prefix)
##########################################################################################################
class SipPackage(Package):
src_uri = 'http://pkgs.fedoraproject.org/repo/pkgs/sip/sip-4.12.3.tar.gz/d0f1fa60494db04b4d115d4c2d92f79e/sip-4.12.3.tar.gz'
correctMD5sum = 'd0f1fa60494db04b4d115d4c2d92f79e'
workdir = 'sip-4.12.3'
def configure_darwin(self):
return pythonExecutable+" configure.py --arch=x86_64 -s MacOSX10.6.sdk" # +self.prefix + "/include/sip "
def configure_linux(self):
return pythonExecutable+" configure.py" # +self.prefix + "/include/sip "
############################################################################################################
class H5pyPackage(Package):
src_uri = 'http://h5py.googlecode.com/files/h5py-1.3.1.tar.gz'
workdir = 'h5py-1.3.1'
def configure(self):
cmd = pythonExecutable+" setup.py configure --hdf5=" + self.prefix
self.system(cmd)
def make(self):
cmd = pythonExecutable+" setup.py build"
self.system(cmd)
def makeInstall(self):
cmd = pythonExecutable+" setup.py install"
self.system(cmd)
#############################################################################################################
class GreenletPackage(Package):
src_uri = 'http://pypi.python.org/packages/source/g/greenlet/greenlet-0.3.1.tar.gz'
correctMD5sum = '8d75d7f3f659e915e286e1b0fa0e1c4d'
workdir = 'greenlet-0.3.1'
def configure(self):
pass
def make(self):
cmd = pythonExecutable+" setup.py build"
self.system(cmd)
def makeInstall(self):
cmd = pythonExecutable+" setup.py install"
self.system(cmd)
#############################################################################################################
class PsutilPackage(Package):
src_uri = 'http://psutil.googlecode.com/files/psutil-0.3.0.tar.gz'
workdir = 'psutil-0.3.0'
def configure(self):
pass
def make(self):
cmd = pythonExecutable+" setup.py build"
self.system(cmd)
def makeInstall(self):
cmd = pythonExecutable+" setup.py install"
self.system(cmd)
##############################################################################################################
class PyOpenGLAccelleratePackage(Package):
src_uri = 'http://pypi.python.org/packages/source/P/PyOpenGL-accelerate/PyOpenGL-accelerate-3.0.1.tar.gz'
workdir = 'PyOpenGL-accelerate-3.0.1'
def configure(self):
pass
def make(self):
cmd = pythonExecutable+" setup.py build"
self.system(cmd)
def makeInstall(self):
cmd = pythonExecutable+" setup.py install"
self.system(cmd)
################################################################################################################
class PyOpenGLPackage(Package):
src_uri = 'http://pypi.python.org/packages/source/P/PyOpenGL/PyOpenGL-3.0.1.tar.gz'
workdir = 'PyOpenGL-3.0.1'
def configure(self):
pass
def make(self):
cmd = pythonExecutable+" setup.py build"
self.system(cmd)
def makeInstall(self):
cmd = pythonExecutable+" setup.py install"
self.system(cmd)
#####################################################################################################################
class Qimage2ndarrayPackage(Package):
src_uri = 'http://kogs-www.informatik.uni-hamburg.de/~meine/software/qimage2ndarray/dist/qimage2ndarray-1.0.tar.gz'
workdir = 'qimage2ndarray-1.0'
if platform.system() == "Darwin":
patches = ['qimage2array.patch']
def unpack(self):
Package.unpack(self)
if platform.system() == "Darwin":
self.system("sed -i '.bkp' -e 's|config.qt_inc_dir|\"%s\"|g' setup.py" % \
(self.prefix+"/include/"))
self.system("sed -i '.bkp' -e 's|config.qt_lib_dir|\"%s\"|g' setup.py" % \
(self.prefix+"/lib"))
def configure(self):
pass
def make(self):
self.system("%s setup.py build" % pythonExecutable)
def makeInstall(self):
self.system("%s setup.py install --prefix=%s" % (pythonExecutable, pythonVersionPath))
################################################################################################
class VTKGitPackage(Package):
src_uri = "git://vtk.org/VTK.git"
workdir = 'VTK'
def unpack(self):
Package.unpack(self, copyToWork=False)
def configure(self):
cmd = cmake + """\\
-DVTK_PYTHON_SETUP_ARGS=--prefix='%s'\\
-DSIP_EXECUTABLE:FILEPATH=%s/sip\\
-DSIP_INCLUDE_DIR:PATH=%s/sip\\
-DSIP_PYQT_DIR:PATH=%s/sip/PyQt4\\
-DVTK_WRAP_PYTHON_SIP:BOOL=ON\\
-DPYTHON_EXECUTABLE:FILEPATH=%s\\
-DPYTHON_INCLUDE_DIR:PATH=%s\\
-DPYTHON_LIBRARY:FILEPATH=%s\\
-DVTK_WRAP_PYTHON:BOOL=ON\\
-DVTK_WRAP_PYTHON_SIP:BOOL=ON\\
-DCMAKE_SHARED_LIBS:BOOL=ON\\
-DVTK_USE_QT:BOOL=ON\\
-DVTK_USE_QVTK_QTOPENGL:BOOL=ON\\
-DVTK_USE_SYSTEM_HDF5:BOOL=ON\\
-DCMAKE_INSTALL_PREFIX=%s \\
-DVTK_INSTALL_LIB_DIR=lib \\
-DVTK_INSTALL_INCLUDE_DIR=include \\
-DVTK_INSTALL_PACKAGE_DIR=lib/vtk \\
-DCMAKE_BUILD_TYPE=Release \\
-DBUILD_EXAMPLES=OFF \\
-DBUILD_TESTING=OFF \\
-DVTK_USE_GEOVIS=ON \\
-DVTK_USE_INFOVIS=ON \\
-DVTK_USE_CHARTS=ON \\
-DBUILD_SHARED_LIBS=ON \\
-DVTK_USE_SYSTEM_EXPAT=ON \\
-DVTK_USE_SYSTEM_FREETYPE=OFF \\
-DVTK_USE_SYSTEM_JPEG=ON \\
-DVTK_USE_SYSTEM_LIBXML2=OFF \\
-DVTK_USE_SYSTEM_PNG=ON \\
-DVTK_USE_SYSTEM_TIFF=ON \\
-DVTK_USE_SYSTEM_ZLIB=ON \\
-DVTK_USE_SYSTEM_HDF5=ON \\
-DVTK_USE_HYBRID=ON \\
-DVTK_USE_GL2PS=ON \\
-DVTK_USE_RENDERING=ON \\
-DVTK_WRAP_PYTHON=ON \\
-DVTK_WRAP_PYTHON_SIP=ON \\
-DVTK_USE_QT=ON \\
-DVTK_USE_QVTK=ON \\
-DVTK_USE_QVTK_QTOPENGL=ON \\
-DVTK_USE_QTOPENGL=ON \\
-DVTK_WRAP_CPP=ON \\
-DVTK_WRAP_UI=ON \\
-DVTK_USE_TK:BOOL=OFF \\
-DDESIRED_QT_VERSION=4 \\
../../distfiles/VTK""" % (pythonVersionPath, pythonBinaryPath, pythonIncludePath, pythonSharePath, \
pythonExecutable, pythonIncludePath, pythonLibrary, \
self.prefix)
self.system(cmd)
def makeInstall(self):
cmd = "make install"
if platform.system() != "Darwin":
#FIXME: on 'make install', cmake complains about this missing file
#why?
self.system("touch Utilities/metaIOConfig.h")
cmd = "LD_LIBRARY_PATH=%s make install" % (self.prefix + "/lib",)
self.system(cmd)
#####################################################################################################################################
class LISPackage(Package):
src_uri = 'http://www.ssisc.org/lis/dl/lis-1.2.53.tar.gz'
correctMD5sum = '275597239e7c47ab5aadeee7b7e2c6ce'
workdir = 'lis-1.2.53'
def configure_darwin(self):
return './configure --enable-omp --prefix=%s --enable-shared=yes' % (self.prefix)
def configure_linux(self):
return './configure --enable-omp --prefix=%s --enable-shared=yes' % (self.prefix)
###############################################################################################################
class SetuptoolsPackage(Package):
src_uri = "http://pypi.python.org/packages/source/s/setuptools/setuptools-0.6c11.tar.gz"
workdir = "setuptools-0.6c11"
def configure(self):
pass
def make(self):
self.system(pythonExecutable+" setup.py build")
def makeInstall(self):
self.system(pythonExecutable+" setup.py install")
###############################################################################################################
class EnthoughtBasePackage(Package):
src_uri = "http://enthought.com/repo/ets/EnthoughtBase-3.1.0.tar.gz"
workdir = "EnthoughtBase-3.1.0"
correctMD5sum = '1d8f6365d20dfd5c4232334e80b0cfdf'
patches = ['pyqt-correct-api-version.patch']
def configure(self):
pass
def make(self):
self.system(pythonExecutable+" setup.py build")
def makeInstall(self):
self.system(pythonExecutable+" setup.py install")
###############################################################################################################
class TraitsPackage(Package):
src_uri = "http://www.enthought.com/repo/ETS/Traits-3.6.0.tar.gz"
workdir = "Traits-3.6.0"
correctMD5sum = 'f20092b1de7c470f61cc95ff4f2090e2'
def configure(self):
pass
def make(self):
self.system(pythonExecutable+" setup.py build")
def makeInstall(self):
self.system(pythonExecutable+" setup.py install")
###################################################################################################
class TraitsBackendQtPackage(Package):
src_uri = "http://www.enthought.com/repo/ETS/TraitsBackendQt-3.6.0.tar.gz"
workdir = "TraitsBackendQt-3.6.0"
correctMD5sum = 'a655ae137af4d8590739618926e21893'
patches = ['enthought-no-webkit.patch', 'enthought-no-svg.patch']
def configure(self):
pass
def make(self):
self.system(pythonExecutable+" setup.py build")
def makeInstall(self):
self.system(pythonExecutable+" setup.py install")
###################################################################################################
class TraitsGUIPackage(Package):
src_uri = "http://www.enthought.com/repo/ETS/TraitsGUI-3.6.0.tar.gz"
workdir = "TraitsGUI-3.6.0"
#correctMD5sum = 'a655ae137af4d8590739618926e21893'
def configure(self):
pass
def make(self):
self.system(pythonExecutable+" setup.py build")
def makeInstall(self):
self.system(pythonExecutable+" setup.py install")
###################################################################################################
class Py2appPackage(Package):
src_uri = "http://pypi.python.org/packages/source/p/py2app/py2app-0.6.1.tar.gz"
workdir = "py2app-0.6.1"
correctMD5sum = 'c60eee8f519c93070329de9adeeb14d6'
def configure(self):
pass
def make(self):
self.system(pythonExecutable+" setup.py build")
def makeInstall(self):
self.system(pythonExecutable+" setup.py install")
###################################################################################################
class CStraehlePackage(Package):
src_uri = ''
workdir = 'cstraehl-vigranumpy'
if platform.system() == 'Darwin':
patches = ['link-svs-darwin.patch']
else:
patches = ['link-svs-linux.patch']
def __init__(self):
if not os.path.exists("cstraehle-git-url.txt"):
raise RuntimeError("You need to put the git:// url into a file called 'cstraehle-git-url.txt'")
f = open("cstraehle-git-url.txt", 'r')
l = f.readlines()[0]
CStraehlePackage.src_uri = l.strip()
Package.__init__(self)
def configure(self):
pass
def make(self):
pass
def makeInstall(self):
pass
###################################################################################################
class VigraPackage(Package):
src_uri = 'git@github.com:ukoethe/vigra.git'
workdir = 'vigra'
def configure(self):
dylibext = "dylib"
if platform.system() != "Darwin":
dylibext = "so"
cmd = """%s . \\
-DDEPENDENCY_SEARCH_PREFIX=%s \\
-DCMAKE_INSTALL_PREFIX=%s \\
-DBOOST_ROOT=%s \\
-DWITH_VIGRANUMPY=1 \\
-DCMAKE_BUILD_TYPE=Release \\
-DPYTHON_EXECUTABLE=%s \\
-DPYTHON_LIBRARY:FILEPATH=%s \\
-DPYTHON_LIBRARIES:FILEPATH=%s \\
-DPYTHON_INCLUDE_PATH:PATH=%s \\
-DPYTHON_INCLUDE_DIR:PATH=%s \\
-DLIS_INCLUDE_DIR=%s/include \\
-DLIS_LIBRARY=%s/lib/liblis.%s\\
""" % (cmake, self.prefix, self.prefix, self.prefix, \
pythonExecutable, pythonLibrary, pythonLibrary, pythonIncludePath, pythonIncludePath, \
self.prefix, self.prefix, dylibext,)
self.system(cmd)
os.system('cd work/vigra && patch --forward -p0 < ../../files/vigra_include_private.patch')
#############self.system('cd vigranumpy && cp -r ../../cstraehl-vigranumpy private')
#reconfigure now that we have added the private dir!
self.system(cmd)
def make(self, parallel = multiprocessing.cpu_count()):
self.system(make + " -j" + str(parallel))
|
ilastik/ilastik-0.5
|
scripts/PackagesItems.py
|
Python
|
bsd-2-clause
| 25,297
|
[
"VTK"
] |
50fce19cf3e89f934f049e795fbf390d9a78eff93b4676da0cdc85066cf0705c
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals
from __future__ import absolute_import, print_function
"""
This module provides classes used to enumerate surface sites
and to find adsorption sites on slabs
"""
import numpy as np
from six.moves import range
from pymatgen import Structure, Lattice, vis
import tempfile
import sys
import subprocess
import itertools
import os
from monty.serialization import loadfn
from scipy.spatial import Delaunay
import warnings
from pymatgen.core.operations import SymmOp
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
from pymatgen.symmetry.analyzer import generate_full_symmops
from pymatgen.util.coord import in_coord_list, in_coord_list_pbc
from pymatgen.core.sites import PeriodicSite
from pymatgen.analysis.local_env import VoronoiNN
from pymatgen.core.surface import generate_all_slabs
from pymatgen.analysis.structure_matcher import StructureMatcher
from matplotlib import patches
from matplotlib.path import Path
__author__ = "Joseph Montoya"
__copyright__ = "Copyright 2016, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Joseph Montoya"
__credits__ = "Richard Tran"
__email__ = "montoyjh@lbl.gov"
__status__ = "Development"
__date__ = "December 2, 2015"
class AdsorbateSiteFinder(object):
"""
This class finds adsorbate sites on slabs and generates
adsorbate structures according to user-defined criteria.
The algorithm for finding sites is essentially as follows:
1. Determine "surface sites" by finding those within
a height threshold along the miller index of the
highest site
2. Create a network of surface sites using the Delaunay
triangulation of the surface sites
3. Assign on-top, bridge, and hollow adsorption sites
at the nodes, edges, and face centers of the Del.
Triangulation
4. Generate structures from a molecule positioned at
these sites
"""
def __init__(self, slab, selective_dynamics=False,
height=0.9, mi_vec=None):
"""
Create an AdsorbateSiteFinder object.
Args:
slab (Slab): slab object for which to find adsorbate sites
selective_dynamics (bool): flag for whether to assign
non-surface sites as fixed for selective dynamics
height (float): height criteria for selection of surface sites
mi_vec (3-D array-like): vector corresponding to the vector
concurrent with the miller index, this enables use with
slabs that have been reoriented, but the miller vector
must be supplied manually
top_surface (bool): Which surface to adsorb, True for the surface
above the center of mass, False for the surface below
center of mass
"""
# get surface normal from miller index
if mi_vec:
self.mvec = mi_vec
else:
self.mvec = get_mi_vec(slab)
slab = self.assign_site_properties(slab, height)
if selective_dynamics:
slab = self.assign_selective_dynamics(slab)
self.slab = slab
@classmethod
def from_bulk_and_miller(cls, structure, miller_index, min_slab_size=8.0,
min_vacuum_size=10.0, max_normal_search=None,
center_slab=True, selective_dynamics=False,
undercoord_threshold=0.09):
"""
This method constructs the adsorbate site finder from a bulk
structure and a miller index, which allows the surface sites
to be determined from the difference in bulk and slab coordination,
as opposed to the height threshold.
Args:
structure (Structure): structure from which slab
input to the ASF is constructed
miller_index (3-tuple or list): miller index to be used
min_slab_size (float): min slab size for slab generation
min_vacuum_size (float): min vacuum size for slab generation
max_normal_search (int): max normal search for slab generation
center_slab (bool): whether to center slab in slab generation
selective dynamics (bool): whether to assign surface sites
to selective dynamics
undercoord_threshold (float): threshold of "undercoordation"
to use for the assignment of surface sites. Default is
0.1, for which surface sites will be designated if they
are 10% less coordinated than their bulk counterpart
"""
# TODO: for some reason this works poorly with primitive cells
# may want to switch the coordination algorithm eventually
vnn_bulk = VoronoiNN(tol=0.05)
bulk_coords = [len(vnn_bulk.get_nn(structure, n))
for n in range(len(structure))]
struct = structure.copy(site_properties={'bulk_coordinations': bulk_coords})
slabs = generate_all_slabs(struct, max_index=max(miller_index),
min_slab_size=min_slab_size,
min_vacuum_size=min_vacuum_size,
max_normal_search=max_normal_search,
center_slab=center_slab)
slab_dict = {slab.miller_index: slab for slab in slabs}
if miller_index not in slab_dict:
raise ValueError("Miller index not in slab dict")
this_slab = slab_dict[miller_index]
vnn_surface = VoronoiNN(tol=0.05, allow_pathological=True)
surf_props, undercoords = [], []
this_mi_vec = get_mi_vec(this_slab)
mi_mags = [np.dot(this_mi_vec, site.coords) for site in this_slab]
average_mi_mag = np.average(mi_mags)
for n, site in enumerate(this_slab):
bulk_coord = this_slab.site_properties['bulk_coordinations'][n]
slab_coord = len(vnn_surface.get_nn(this_slab, n))
mi_mag = np.dot(this_mi_vec, site.coords)
undercoord = (bulk_coord - slab_coord) / bulk_coord
undercoords += [undercoord]
if undercoord > undercoord_threshold and mi_mag > average_mi_mag:
surf_props += ['surface']
else:
surf_props += ['subsurface']
new_site_properties = {'surface_properties': surf_props,
'undercoords': undercoords}
new_slab = this_slab.copy(site_properties=new_site_properties)
return cls(new_slab, selective_dynamics)
def find_surface_sites_by_height(self, slab, height=0.9, xy_tol=0.05):
"""
This method finds surface sites by determining which sites are within
a threshold value in height from the topmost site in a list of sites
Args:
site_list (list): list of sites from which to select surface sites
height (float): threshold in angstroms of distance from topmost
site in slab along the slab c-vector to include in surface
site determination
xy_tol (float): if supplied, will remove any sites which are
within a certain distance in the miller plane.
Returns:
list of sites selected to be within a threshold of the highest
"""
# Get projection of coordinates along the miller index
m_projs = np.array([np.dot(site.coords, self.mvec)
for site in slab.sites])
# Mask based on window threshold along the miller index.
mask = (m_projs - np.amax(m_projs)) >= -height
surf_sites = [slab.sites[n] for n in np.where(mask)[0]]
if xy_tol:
# sort surface sites by height
surf_sites = [s for (h, s) in zip(m_projs[mask], surf_sites)]
surf_sites.reverse()
unique_sites, unique_perp_fracs = [], []
for site in surf_sites:
this_perp = site.coords - np.dot(site.coords, self.mvec)
this_perp_frac = slab.lattice.get_fractional_coords(this_perp)
if not in_coord_list_pbc(unique_perp_fracs, this_perp_frac):
unique_sites.append(site)
unique_perp_fracs.append(this_perp_frac)
surf_sites = unique_sites
return surf_sites
def assign_site_properties(self, slab, height=0.9):
"""
Assigns site properties.
"""
if 'surface_properties' in slab.site_properties.keys():
return slab
else:
surf_sites = self.find_surface_sites_by_height(slab, height)
surf_props = ['surface' if site in surf_sites
else 'subsurface' for site in slab.sites]
return slab.copy(
site_properties={'surface_properties': surf_props})
def get_extended_surface_mesh(self, repeat=(5, 5, 1)):
"""
Gets an extended surface mesh for to use for adsorption
site finding by constructing supercell of surface sites
Args:
repeat (3-tuple): repeat for getting extended surface mesh
"""
surf_str = Structure.from_sites(self.surface_sites)
surf_str.make_supercell(repeat)
return surf_str
@property
def surface_sites(self):
"""
convenience method to return a list of surface sites
"""
return [site for site in self.slab.sites
if site.properties['surface_properties'] == 'surface']
def subsurface_sites(self):
"""
convenience method to return list of subsurface sites
"""
return [site for site in self.slab.sites
if site.properties['surface_properties'] == 'subsurface']
def find_adsorption_sites(self, distance=2.0, put_inside=True,
symm_reduce=1e-2, near_reduce=1e-2,
positions=['ontop', 'bridge', 'hollow'],
no_obtuse_hollow=True):
"""
Finds surface sites according to the above algorithm. Returns
a list of corresponding cartesian coordinates.
Args:
distance (float): distance from the coordinating ensemble
of atoms along the miller index for the site (i. e.
the distance from the slab itself)
put_inside (bool): whether to put the site inside the cell
symm_reduce (float): symm reduction threshold
near_reduce (float): near reduction threshold
positions (list): which positions to include in the site finding
"ontop": sites on top of surface sites
"bridge": sites at edges between surface sites in Delaunay
triangulation of surface sites in the miller plane
"hollow": sites at centers of Delaunay triangulation faces
"subsurface": subsurface positions projected into miller plane
no_obtuse_hollow (bool): flag to indicate whether to include
obtuse triangular ensembles in hollow sites
"""
ads_sites = {k: [] for k in positions}
if 'ontop' in positions:
ads_sites['ontop'] = [s.coords for s in self.surface_sites]
if 'subsurface' in positions:
# Get highest site
ref = self.slab.sites[np.argmax(self.slab.cart_coords[:, 2])]
# Project diff between highest site and subs site into miller
ss_sites = [self.mvec * np.dot(ref.coords - s.coords, self.mvec)
+ s.coords for s in self.subsurface_sites()]
ads_sites['subsurface'] = ss_sites
if 'bridge' in positions or 'hollow' in positions:
mesh = self.get_extended_surface_mesh()
sop = get_rot(self.slab)
dt = Delaunay([sop.operate(m.coords)[:2] for m in mesh])
# TODO: refactor below to properly account for >3-fold
for v in dt.simplices:
if -1 not in v:
dots = []
for i_corner, i_opp in zip(range(3), ((1, 2), (0, 2), (0, 1))):
corner, opp = v[i_corner], [v[o] for o in i_opp]
vecs = [mesh[d].coords - mesh[corner].coords for d in opp]
vecs = [vec / np.linalg.norm(vec) for vec in vecs]
dots.append(np.dot(*vecs))
# Add bridge sites at midpoints of edges of D. Tri
if 'bridge' in positions:
ads_sites["bridge"].append(
self.ensemble_center(mesh, opp))
# Prevent addition of hollow sites in obtuse triangles
obtuse = no_obtuse_hollow and (np.array(dots) < 1e-5).any()
# Add hollow sites at centers of D. Tri faces
if 'hollow' in positions and not obtuse:
ads_sites['hollow'].append(
self.ensemble_center(mesh, v))
ads_sites['all'] = sum(ads_sites.values(), [])
for key, sites in ads_sites.items():
# Pare off outer sites for bridge/hollow
if key in ['bridge', 'hollow']:
frac_coords = [self.slab.lattice.get_fractional_coords(ads_site)
for ads_site in sites]
frac_coords = [frac_coord for frac_coord in frac_coords
if (frac_coord[0] > 1 and frac_coord[0] < 4
and frac_coord[1] > 1 and frac_coord[1] < 4)]
sites = [self.slab.lattice.get_cartesian_coords(frac_coord)
for frac_coord in frac_coords]
if near_reduce:
sites = self.near_reduce(sites, threshold=near_reduce)
if put_inside:
sites = [put_coord_inside(self.slab.lattice, coord)
for coord in sites]
if symm_reduce:
sites = self.symm_reduce(sites, threshold=symm_reduce)
sites = [site + distance * self.mvec for site in sites]
ads_sites[key] = sites
return ads_sites
def symm_reduce(self, coords_set, threshold=1e-6):
"""
Reduces the set of adsorbate sites by finding removing
symmetrically equivalent duplicates
Args:
coords_set: coordinate set in cartesian coordinates
threshold: tolerance for distance equivalence, used
as input to in_coord_list_pbc for dupl. checking
"""
surf_sg = SpacegroupAnalyzer(self.slab, 0.1)
symm_ops = surf_sg.get_symmetry_operations()
unique_coords = []
# Convert to fractional
coords_set = [self.slab.lattice.get_fractional_coords(coords)
for coords in coords_set]
for coords in coords_set:
incoord = False
for op in symm_ops:
if in_coord_list_pbc(unique_coords, op.operate(coords),
atol=threshold):
incoord = True
break
if not incoord:
unique_coords += [coords]
# convert back to cartesian
return [self.slab.lattice.get_cartesian_coords(coords)
for coords in unique_coords]
def near_reduce(self, coords_set, threshold=1e-4):
"""
Prunes coordinate set for coordinates that are within
threshold
Args:
coords_set (Nx3 array-like): list or array of coordinates
threshold (float): threshold value for distance
"""
unique_coords = []
coords_set = [self.slab.lattice.get_fractional_coords(coords)
for coords in coords_set]
for coord in coords_set:
if not in_coord_list_pbc(unique_coords, coord, threshold):
unique_coords += [coord]
return [self.slab.lattice.get_cartesian_coords(coords)
for coords in unique_coords]
def ensemble_center(self, site_list, indices, cartesian=True):
"""
Finds the center of an ensemble of sites selected from
a list of sites. Helper method for the find_adsorption_sites
algorithm.
Args:
site_list (list of sites): list of sites
indices (list of ints): list of ints from which to select
sites from site list
cartesian (bool): whether to get average fractional or
cartesian coordinate
"""
if cartesian:
return np.average([site_list[i].coords for i in indices],
axis=0)
else:
return np.average([site_list[i].frac_coords for i in indices],
axis=0)
def add_adsorbate(self, molecule, ads_coord, repeat=None, reorient=True):
"""
Adds an adsorbate at a particular coordinate. Adsorbate
represented by a Molecule object, and is positioned relative
to the input adsorbate coordinate.
Args:
molecule (Molecule): molecule object representing the adsorbate
ads_coord (array): coordinate of adsorbate position
repeat (3-tuple or list): input for making a supercell of slab
prior to placing the adsorbate
reorient (bool): flag on whether to reorient the molecule to
have its z-axis concurrent with miller index
"""
if reorient:
# Reorient the molecule along slab m_index
sop = get_rot(self.slab)
molecule.apply_operation(sop.inverse)
struct = self.slab.copy()
if repeat:
struct.make_supercell(repeat)
if 'surface_properties' in struct.site_properties.keys():
molecule.add_site_property("surface_properties",
["adsorbate"] * molecule.num_sites)
if 'selective_dynamics' in struct.site_properties.keys():
molecule.add_site_property("selective_dynamics",
[[True, True, True]] * molecule.num_sites)
for site in molecule:
struct.append(site.specie, ads_coord + site.coords, coords_are_cartesian=True,
properties=site.properties)
return struct
def assign_selective_dynamics(self, slab):
"""
Helper function to assign selective dynamics site_properties
based on surface, subsurface site properties
Args:
slab (Slab): slab for which to assign selective dynamics
"""
sd_list = []
sd_list = [[False, False, False] if site.properties['surface_properties'] == 'subsurface'
else [True, True, True] for site in slab.sites]
new_sp = slab.site_properties
new_sp['selective_dynamics'] = sd_list
return slab.copy(site_properties=new_sp)
def generate_adsorption_structures(self, molecule, repeat=None, min_lw=5.0,
reorient=True, find_args={}):
"""
Function that generates all adsorption structures for a given
molecular adsorbate. Can take repeat argument or minimum
length/width of precursor slab as an input
Args:
molecule (Molecule): molecule corresponding to adsorbate
repeat (3-tuple or list): repeat argument for supercell generation
min_lw (float): minimum length and width of the slab, only used
if repeat is None
reorient (bool): flag on whether or not to reorient adsorbate
along the miller index
find_args (dict): dictionary of arguments to be passed to the
call to self.find_adsorption_sites, e.g. {"distance":2.0}
"""
if repeat is None:
xrep = np.ceil(min_lw / np.linalg.norm(self.slab.lattice.matrix[0]))
yrep = np.ceil(min_lw / np.linalg.norm(self.slab.lattice.matrix[1]))
repeat = [xrep, yrep, 1]
structs = []
for coords in self.find_adsorption_sites(**find_args)['all']:
structs.append(self.add_adsorbate(
molecule, coords, repeat=repeat, reorient=reorient))
return structs
def adsorb_both_surfaces(self, molecule, repeat=None, min_lw=5.0,
reorient=True, find_args={}):
"""
Function that generates all adsorption structures for a given
molecular adsorbate on both surfaces of a slab. This is useful
for calculating surface energy where both surfaces need to be
equivalent or if we want to calculate nonpolar systems.
Args:
molecule (Molecule): molecule corresponding to adsorbate
repeat (3-tuple or list): repeat argument for supercell generation
min_lw (float): minimum length and width of the slab, only used
if repeat is None
reorient (bool): flag on whether or not to reorient adsorbate
along the miller index
find_args (dict): dictionary of arguments to be passed to the
call to self.find_adsorption_sites, e.g. {"distance":2.0}
"""
# Get the adsorbed surfaces first
adslabs = self.generate_adsorption_structures(molecule, repeat=repeat,
min_lw=min_lw,
reorient=reorient,
find_args=find_args)
new_adslabs = []
for adslab in adslabs:
# Find the adsorbate sites and indices in each slab
symmetric, adsorbates, indices = False, [], []
for i, site in enumerate(adslab.sites):
if site.surface_properties == "adsorbate":
adsorbates.append(site)
indices.append(i)
# Start with the clean slab
adslab.remove_sites(indices)
slab = adslab.copy()
# For each site, we add it back to the slab along with a
# symmetrically equivalent position on the other side of
# the slab using symmetry operations
for adsorbate in adsorbates:
p2 = adslab.get_symmetric_site(adsorbate.frac_coords)
slab.append(adsorbate.specie, p2,
properties={"surface_properties": "adsorbate"})
slab.append(adsorbate.specie, adsorbate.frac_coords,
properties={"surface_properties": "adsorbate"})
new_adslabs.append(slab)
return new_adslabs
def generate_substitution_structures(self, atom, target_species=[],
sub_both_sides=False, range_tol=1e-2,
dist_from_surf=0):
"""
Function that performs substitution-type doping on the surface and
returns all possible configurations where one dopant is substituted
per surface. Can substitute one surface or both.
Args:
atom (str): atom corresponding to substitutional dopant
sub_both_sides (bool): If true, substitute an equivalent
site on the other surface
target_species (list): List of specific species to substitute
range_tol (float): Find viable substitution sites at a specific
distance from the surface +- this tolerance
dist_from_surf (float): Distance from the surface to find viable
substitution sites, defaults to 0 to substitute at the surface
"""
# Get symmetrized structure in case we want to substitue both sides
sym_slab = SpacegroupAnalyzer(self.slab).get_symmetrized_structure()
# Define a function for substituting a site
def substitute(site, i):
slab = self.slab.copy()
props = self.slab.site_properties
if sub_both_sides:
# Find an equivalent site on the other surface
eq_indices = [indices for indices in
sym_slab.equivalent_indices if i in indices][0]
for ii in eq_indices:
if "%.6f" % (sym_slab[ii].frac_coords[2]) != \
"%.6f" % (site.frac_coords[2]):
props["surface_properties"][ii] = "substitute"
slab.replace(ii, atom)
break
props["surface_properties"][i] = "substitute"
slab.replace(i, atom)
slab.add_site_property("surface_properties",
props["surface_properties"])
return slab
# Get all possible substitution sites
substituted_slabs = []
# Sort sites so that we can define a range relative to the position of the
# surface atoms, i.e. search for sites above (below) the bottom (top) surface
sorted_sites = sorted(sym_slab, key=lambda site: site.frac_coords[2])
if sorted_sites[0].surface_properties == "surface":
d = sorted_sites[0].frac_coords[2] + dist_from_surf
else:
d = sorted_sites[-1].frac_coords[2] - dist_from_surf
for i, site in enumerate(sym_slab):
if d - range_tol < site.frac_coords[2] < d + range_tol:
if target_species and site.species_string in target_species:
substituted_slabs.append(substitute(site, i))
elif not target_species:
substituted_slabs.append(substitute(site, i))
matcher = StructureMatcher()
return [s[0] for s in matcher.group_structures(substituted_slabs)]
def get_mi_vec(slab):
"""
Convenience function which returns the unit vector aligned
with the miller index.
"""
mvec = np.cross(slab.lattice.matrix[0], slab.lattice.matrix[1])
return mvec / np.linalg.norm(mvec)
def get_rot(slab):
"""
Gets the transformation to rotate the z axis into the miller index
"""
new_z = get_mi_vec(slab)
a, b, c = slab.lattice.matrix
new_x = a / np.linalg.norm(a)
new_y = np.cross(new_z, new_x)
x, y, z = np.eye(3)
rot_matrix = np.array([np.dot(*el) for el in
itertools.product([x, y, z],
[new_x, new_y, new_z])]).reshape(3, 3)
rot_matrix = np.transpose(rot_matrix)
sop = SymmOp.from_rotation_and_translation(rot_matrix)
return sop
def put_coord_inside(lattice, cart_coordinate):
"""
converts a cartesian coordinate such that it is inside the unit cell.
"""
fc = lattice.get_fractional_coords(cart_coordinate)
return lattice.get_cartesian_coords([c - np.floor(c) for c in fc])
def reorient_z(structure):
"""
reorients a structure such that the z axis is concurrent with the
normal to the A-B plane
"""
struct = structure.copy()
sop = get_rot(struct)
struct.apply_operation(sop)
return struct
# Get color dictionary
colors = loadfn(os.path.join(os.path.dirname(vis.__file__),
"ElementColorSchemes.yaml"))
color_dict = {el: [j / 256.001 for j in colors["Jmol"][el]]
for el in colors["Jmol"].keys()}
def plot_slab(slab, ax, scale=0.8, repeat=5, window=1.5,
draw_unit_cell=True, decay=0.2, adsorption_sites=True):
"""
Function that helps visualize the slab in a 2-D plot, for
convenient viewing of output of AdsorbateSiteFinder.
Args:
slab (slab): Slab object to be visualized
ax (axes): matplotlib axes with which to visualize
scale (float): radius scaling for sites
repeat (int): number of repeating unit cells to visualize
window (float): window for setting the axes limits, is essentially
a fraction of the unit cell limits
draw_unit_cell (bool): flag indicating whether or not to draw cell
decay (float): how the alpha-value decays along the z-axis
"""
orig_slab = slab.copy()
slab = reorient_z(slab)
orig_cell = slab.lattice.matrix.copy()
if repeat:
slab.make_supercell([repeat, repeat, 1])
coords = np.array(sorted(slab.cart_coords, key=lambda x: x[2]))
sites = sorted(slab.sites, key=lambda x: x.coords[2])
alphas = 1 - decay * (np.max(coords[:, 2]) - coords[:, 2])
alphas = alphas.clip(min=0)
corner = [0, 0, slab.lattice.get_fractional_coords(coords[-1])[-1]]
corner = slab.lattice.get_cartesian_coords(corner)[:2]
verts = orig_cell[:2, :2]
lattsum = verts[0] + verts[1]
# Draw circles at sites and stack them accordingly
for n, coord in enumerate(coords):
r = sites[n].specie.atomic_radius * scale
ax.add_patch(patches.Circle(coord[:2] - lattsum * (repeat // 2),
r, color='w', zorder=2 * n))
color = color_dict[sites[n].species_string]
ax.add_patch(patches.Circle(coord[:2] - lattsum * (repeat // 2), r,
facecolor=color, alpha=alphas[n],
edgecolor='k', lw=0.3, zorder=2 * n + 1))
# Adsorption sites
if adsorption_sites:
asf = AdsorbateSiteFinder(orig_slab)
ads_sites = asf.find_adsorption_sites()['all']
sop = get_rot(orig_slab)
ads_sites = [sop.operate(ads_site)[:2].tolist()
for ads_site in ads_sites]
ax.plot(*zip(*ads_sites), color='k', marker='x',
markersize=10, mew=1, linestyle='', zorder=10000)
# Draw unit cell
if draw_unit_cell:
verts = np.insert(verts, 1, lattsum, axis=0).tolist()
verts += [[0., 0.]]
verts = [[0., 0.]] + verts
codes = [Path.MOVETO, Path.LINETO, Path.LINETO,
Path.LINETO, Path.CLOSEPOLY]
verts = [(np.array(vert) + corner).tolist() for vert in verts]
path = Path(verts, codes)
patch = patches.PathPatch(path, facecolor='none', lw=2,
alpha=0.5, zorder=2 * n + 2)
ax.add_patch(patch)
ax.set_aspect("equal")
center = corner + lattsum / 2.
extent = np.max(lattsum)
lim_array = [center - extent * window, center + extent * window]
x_lim = [ele[0] for ele in lim_array]
y_lim = [ele[1] for ele in lim_array]
ax.set_xlim(x_lim)
ax.set_ylim(y_lim)
return ax
|
czhengsci/pymatgen
|
pymatgen/analysis/adsorption.py
|
Python
|
mit
| 30,820
|
[
"Jmol",
"pymatgen"
] |
002100887a9a7963adb0277de17e3096375ed0c67a69e2887a74f01d1447f1eb
|
#!/usr/bin/env python
########################################################################
# $HeadURL$
# File : dirac-dms-pfn-metadata
# Author : Stuart Paterson
########################################################################
"""
Retrieve metadata for a PFN given a valid DIRAC SE
"""
from __future__ import print_function
__RCSID__ = "$Id$"
import DIRAC
from DIRAC.Core.Base import Script
Script.setUsageMessage( '\n'.join( [ __doc__.split( '\n' )[1],
'Usage:',
' %s [option|cfgfile] ... PFN SE' % Script.scriptName,
'Arguments:',
' PFN: Physical File Name or file containing PFNs',
' SE: Valid DIRAC SE' ] ) )
Script.parseCommandLine( ignoreErrors = True )
args = Script.getPositionalArgs()
if len( args ) < 2:
Script.showHelp()
if len( args ) > 2:
print('Only one PFN SE pair will be considered')
from DIRAC.Interfaces.API.Dirac import Dirac
dirac = Dirac()
exitCode = 0
pfn = args[0]
seName = args[1]
try:
f = open( pfn, 'r' )
pfns = f.read().splitlines()
f.close()
except:
pfns = [pfn]
for pfn in pfns:
result = dirac.getPhysicalFileMetadata( pfn, seName, printOutput = True )
if not result['OK']:
print('ERROR: ', result['Message'])
exitCode = 2
DIRAC.exit( exitCode )
|
chaen/DIRAC
|
Interfaces/scripts/dirac-dms-pfn-metadata.py
|
Python
|
gpl-3.0
| 1,446
|
[
"DIRAC"
] |
0b654296e70b7f869f0be228631aeea939dca458360c5d735016fdf94ab0c3a4
|
#!/usr/bin/env python3
#
# Reverse : Generate an indented asm code (pseudo-C) with colored syntax.
# Copyright (C) 2015 Joel
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import sys
from time import time
from reverse.lib.ast import (Ast_Branch, Ast_Goto, Ast_Loop, Ast_If_cond,
Ast_IfGoto, Ast_Ifelse, Ast_AndIf, Ast_Comment)
from reverse.lib.utils import BRANCH_NEXT, BRANCH_NEXT_JUMP, debug__
from reverse.lib.exceptions import ExcIfelse
class Endpoint():
def __init__(self, ast, unseen, l_start):
self.ast = [ast]
self.unseen = unseen
self.loop_start = [l_start]
def rendezvous(self, ast, prev, l_start):
self.ast.append(ast)
self.loop_start.append(l_start)
if prev in self.unseen:
self.unseen.remove(prev)
def get_first_addr(ast):
# Assume that there are no Ast_Comment
if isinstance(ast, list):
return ast[0].address
if isinstance(ast, Ast_Branch):
if len(ast.nodes) > 0:
return get_first_addr(ast.nodes[0])
if isinstance(ast, Ast_Ifelse):
# Any instructions at the moment so we can use the jump inst
return ast.jump_inst.address
if isinstance(ast, Ast_Loop):
if len(ast.branch.nodes) > 0:
return get_first_addr(ast.branch.nodes[0])
if isinstance(ast, Ast_Goto):
return ast.addr_jump
if isinstance(ast, Ast_IfGoto):
return ast.orig_jump.address
if isinstance(ast, Ast_AndIf):
return ast.orig_jump.address
if isinstance(ast, Ast_If_cond):
if len(ast.br.nodes) > 0:
return get_first_addr(ast.br.nodes[0])
return -1
def get_next_addr(ast):
par = ast.parent
if par is None:
return -1
i = ast.idx_in_parent + 1
# Get the next address of the parent ast
if i == len(par.nodes):
return get_next_addr(par)
return get_first_addr(par.nodes[i])
# Returns the first address of the current loop only if the i th ast
# is the last in the parent ast.
def is_last_in_loop(ast, i):
par = ast.parent
if par is None:
return -1
is_last = i == len(ast.nodes) - 1
a = ast.parent.nodes[ast.idx_in_parent]
if isinstance(a, Ast_Loop) and is_last:
return get_first_addr(a)
if not is_last:
return -1
return is_last_in_loop(par, ast.idx_in_parent)
def remove_all_unnecessary_goto(ast):
if isinstance(ast, Ast_Branch):
# Remove all last Ast_Goto, only if the previous is not an andif
if len(ast.nodes) > 0 and isinstance(ast.nodes[-1], Ast_Goto):
if len(ast.nodes) <= 1 or not isinstance(ast.nodes[-2], Ast_AndIf):
if not ast.nodes[-1].dont_remove:
nxt = get_next_addr(ast)
if ast.nodes[-1].addr_jump == nxt:
del ast.nodes[-1]
for n in ast.nodes:
if not isinstance(n, list):
remove_all_unnecessary_goto(n)
elif isinstance(ast, Ast_Ifelse):
remove_all_unnecessary_goto(ast.br_next)
remove_all_unnecessary_goto(ast.br_next_jump)
elif isinstance(ast, Ast_Loop):
if isinstance(ast.branch.nodes[-1], Ast_Goto):
if get_first_addr(ast) == ast.branch.nodes[-1].addr_jump:
del ast.branch.nodes[-1]
remove_all_unnecessary_goto(ast.branch)
def fix_non_consecutives(ctx, ast):
if isinstance(ast, Ast_Branch):
idx_to_add = {}
for i, n in enumerate(ast.nodes):
if isinstance(n, list):
ad = n[0].address
if ad in ctx.gph.uncond_jumps_set or ad not in ctx.gph.link_out:
continue
nxt1 = ctx.gph.link_out[ad][BRANCH_NEXT]
if i == len(ast.nodes) - 1:
loop_start = is_last_in_loop(ast, i)
if loop_start != -1:
if nxt1 != loop_start:
idx_to_add[i + 1] = nxt1
continue
nxt2 = get_next_addr(ast)
else:
nxt2 = get_first_addr(ast.nodes[i + 1])
if nxt1 != nxt2:
idx_to_add[i + 1] = nxt1
else:
fix_non_consecutives(ctx, n)
if not idx_to_add:
return
# Add from the end of the nodes list
lst = list(idx_to_add.keys())
lst.sort()
for i in reversed(lst):
ast.nodes.insert(i, Ast_Goto(idx_to_add[i]))
elif isinstance(ast, Ast_Ifelse):
fix_non_consecutives(ctx, ast.br_next)
fix_non_consecutives(ctx, ast.br_next_jump)
elif isinstance(ast, Ast_Loop):
fix_non_consecutives(ctx, ast.branch)
def search_endpoint(ctx, stack, ast, entry, l_set, l_prev_loop, l_start):
endp = __search_endpoint(ctx, stack, ast, entry, l_set, l_prev_loop, l_start)
if endp == -1:
return -1
# Check if we found an endpoint in a subloop : for a "if" it's not possible
# that the end goes in a loop, so we return -1 if this is the case.
if l_prev_loop == -1:
l = ctx.gph.not_in_loop
else:
# l_set contains also subloops, here we just want the current loop
l = ctx.gph.loops_set[(l_prev_loop, l_start)]
if endp not in l:
return -1
return endp
def __search_endpoint(ctx, stack, ast, entry, l_set, l_prev_loop, l_start):
waiting = {}
visited = set()
done = set()
stack = []
for n in ctx.gph.link_out[entry]:
stack.append((entry, n))
while 1:
while stack:
prev, ad = stack.pop(-1)
# Don't go outside the current loop : we want to search
# an if-endpoint.
if l_prev_loop != -1 and ad not in l_set:
continue
# If "ad" is in last_loop_node we are sure that the path
# will loop. So don't keep it if it's a subloop.
if ad in ctx.gph.last_loop_node and \
(l_prev_loop, l_start) not in ctx.gph.last_loop_node[ad]:
continue
# If endpoint == loop : maybe the endpoint is at the end of the loop
# If we have multiple link in, and if it's not a new loop, wait
if ad not in done:
lkin = ctx.gph.link_in[ad]
if ad == l_start or len(lkin) > 1:
unseen = get_unseen_links_in(ad, l_set, l_prev_loop, l_start)
if len(unseen) > 1:
if ad in waiting:
if prev in waiting[ad]:
waiting[ad].remove(prev)
else:
unseen.remove(prev)
waiting[ad] = unseen
continue
if ad in visited:
continue
visited.add(ad)
if ad in ctx.gph.link_out:
for n in ctx.gph.link_out[ad]:
stack.append((ad, n))
if not waiting:
return -1
if len(waiting) == 1:
ad = next(iter(waiting.keys()))
return ad
stack = []
restart = True
while restart:
restart = False
for ad in list(waiting):
if len(waiting[ad]) > 0:
continue
del waiting[ad]
done.add(ad)
stack.append((-1, ad))
# If the stack is still empty but if we have still some waiting
# nodes, search if paths are really possible. If not, delete
# a dependence.
if not stack and waiting:
for ad in set(waiting):
for i in set(waiting[ad]):
if not ctx.gph.path_exists(entry, i):
waiting[ad].remove(i)
if len(waiting[ad]) > 0:
restart = True
else:
del waiting[ad]
if len(waiting) == 1:
ad = next(iter(waiting.keys()))
return ad
if not stack:
return -1
def get_unseen_links_in(ad, l_set, l_prev_loop, l_start):
unseen = set(ctx.gph.link_in[ad])
# Is it the beginning of a loop ?
# Remove internal links to the beginning of the loop
if (l_start, ad) in ctx.gph.loops_all:
sub_loop = ctx.gph.loops_all[(l_start, ad)]
for prev in ctx.gph.link_in[ad]:
if prev in sub_loop and prev in unseen:
unseen.remove(prev)
if l_set is None:
return unseen
# Remove external jumps which are outside the current loop
for prev in ctx.gph.link_in[ad]:
if prev not in l_set and prev in unseen:
unseen.remove(prev)
return unseen
def remove_unnecessary_goto(ast, ad):
if len(ast.nodes) > 1:
if isinstance(ast.nodes[-1], Ast_Goto) and \
ast.nodes[-1].addr_jump == ad:
ast.nodes.pop(-1)
def rm_waiting(ctx, waiting, ad):
# Get the ast which has the smallest level
min_level_idx = -1
list_ast = waiting[ad].ast
list_loop_start = waiting[ad].loop_start
for i, a in enumerate(list_ast):
if (list_loop_start[i], ad) in ctx.gph.false_loops:
continue
if min_level_idx == -1 or a.level < list_ast[min_level_idx].level:
min_level_idx = i
if min_level_idx == -1:
print("errorD: this is a bug, please report")
sys.exit(1)
ast = list_ast[min_level_idx]
# Add goto on each other ast
# If they are finally unuseful, they will be deleted with
# remove_unnecessary_goto or in remove_unnecessary_goto
for i, a in enumerate(list_ast):
if i == min_level_idx:
continue
if len(a.nodes) == 0:
a.add(Ast_Goto(ad))
continue
# The previous instruction has not `ad` as the next instruction
if isinstance(a.nodes[-1], list):
prev = a.nodes[-1][0].address
if prev in ctx.gph.uncond_jumps_set:
continue
if prev in ctx.gph.link_out:
n = ctx.gph.link_out[prev][BRANCH_NEXT]
if n != ad:
a.add(Ast_Goto(n))
continue
# The previous is a goto, skip it
if isinstance(a.nodes[-1], Ast_Goto):
continue
a.add(Ast_Goto(ad))
waiting[ad].ast.clear()
del waiting[ad]
return ast
def manage_endpoint(ctx, waiting, ast, prev, ad, l_set, l_prev_loop,
l_start, ad_is_visited):
if ad not in ctx.gph.link_in or len(ctx.gph.link_in[ad]) <= 1:
return ast
# If ad_is_visited is False it means this is a prevision for a future
# visit on this node. Here prev has no sense.
if not ad_is_visited:
if ad not in waiting:
unseen = get_unseen_links_in(ad, l_set, l_prev_loop, l_start)
waiting[ad] = Endpoint(ast, unseen, l_start)
return None
if ad in waiting:
waiting[ad].rendezvous(ast, prev, l_start)
if len(waiting[ad].unseen) != 0:
return None
ast = rm_waiting(ctx, waiting, ad)
return ast
unseen = get_unseen_links_in(ad, l_set, l_prev_loop, l_start)
if len(unseen) > 1:
unseen.remove(prev)
waiting[ad] = Endpoint(ast, unseen, l_start)
return None
return ast
def generate_ast(ctx__):
global ctx
ctx = ctx__
start = time()
ast = Ast_Branch()
ast.parent = None
stack = [(ast, [], -1, ctx.entry, -1)]
visited = set()
waiting = {}
ast_head = ast
fake_br = Ast_Branch()
fake_br.level = sys.maxsize
while stack or waiting:
if not stack and waiting:
if not ctx.gph.skipped_loops_analysis:
break
for ad in set(waiting):
waiting[ad].unseen.clear()
stack.append((fake_br, [], -1, ad, -1))
ast, loops_stack, prev, curr, else_addr = stack.pop(-1)
# Check if we enter in a false loop (see gotoinloop*)
if loops_stack:
_, _, l_start = loops_stack[-1]
else:
l_start = ctx.entry
if (l_start, curr) in ctx.gph.false_loops:
continue
blk = ctx.gph.nodes[curr]
# Exit the current loop
while loops_stack:
l_ast, l_prev_loop, l_start = loops_stack[-1]
l_set = ctx.gph.loops_all[(l_prev_loop, l_start)]
if curr not in l_set:
loops_stack.pop(-1)
ast = l_ast.parent
else:
break
if not loops_stack:
l_prev_loop = -1
l_start = ctx.entry
l_set = None
level = ast.level
if curr not in visited:
# Check if we need to stop and wait on a node
a = manage_endpoint(ctx, waiting, ast, prev, curr, l_set,
l_prev_loop, l_start, True)
if a is None:
continue
ast = a
remove_unnecessary_goto(ast, curr)
# Check if we enter in a new loop
if (l_start, curr) in ctx.gph.loops_all:
if curr not in ctx.gctx.db.reverse_symbols:
name = "loop_0x%x" % curr
ctx.gctx.db.symbols[name] = curr
ctx.gctx.db.reverse_symbols[curr] = name
ctx.gctx.db.modified = True
level += 1
a = Ast_Loop()
a.level = level
a.parent = ast
a.idx_in_parent = len(ast.nodes)
a.branch.parent = ast
a.branch.level = level
a.branch.idx_in_parent = len(ast.nodes)
ast.add(a)
ast = a.branch
loops_stack.append((a, l_start, curr))
else_addr = -1
l_ast = a
l_set = ctx.gph.loops_all[(l_start, curr)]
l_prev_loop = l_start
l_start = curr
if (l_prev_loop, l_start) in ctx.gph.infinite_loop:
a.is_infinite = True
# Here curr may has changed
if curr in visited:
if curr == l_start:
continue
if len(ast.nodes) > 0:
if isinstance(ast.nodes[-1], list):
prev = ast.nodes[-1][0].address
if prev not in ctx.gph.uncond_jumps_set:
ast.add(Ast_Goto(curr))
else:
ast.add(Ast_Goto(curr))
continue
visited.add(curr)
# Return instruction
if curr not in ctx.gph.link_out:
if curr != ctx.entry and curr not in ctx.gctx.db.reverse_symbols:
name = "ret_0x%x" % curr
ctx.gctx.db.symbols[name] = curr
ctx.gctx.db.reverse_symbols[curr] = name
ctx.gctx.db.modified = True
ast.add(blk)
continue
nxt = ctx.gph.link_out[curr]
if curr in ctx.gctx.dis.jmptables:
ast.add(blk)
for n in nxt:
stack.append((ast, loops_stack, curr, n, else_addr))
elif len(nxt) == 2:
# We are on a conditional jump
prefetch = blk[1] if len(blk) == 2 else None
if loops_stack:
goto_set = False
c1 = nxt[BRANCH_NEXT] not in l_set
c2 = nxt[BRANCH_NEXT_JUMP] not in l_set
if c1 and c2:
raise ExcIfelse(curr)
if c1:
exit_loop = nxt[BRANCH_NEXT]
nxt_node_in_loop = nxt[BRANCH_NEXT_JUMP]
cond_id = ctx.gctx.libarch.utils.invert_cond(blk[0])
goto_set = True
if c2:
exit_loop = nxt[BRANCH_NEXT_JUMP]
nxt_node_in_loop = nxt[BRANCH_NEXT]
cond_id = ctx.gctx.libarch.utils.get_cond(blk[0])
goto_set = True
# goto to exit a loop
if goto_set:
stack.append((ast.parent, list(loops_stack), curr,
exit_loop, else_addr))
stack.append((ast, list(loops_stack), curr,
nxt_node_in_loop, else_addr))
a = Ast_IfGoto(blk[0], cond_id, exit_loop, prefetch)
a.parent = ast
a.level = level
a.idx_in_parent = len(ast.nodes)
ast.add(a)
continue
# and-if
if ctx.gctx.print_andif:
if else_addr == nxt[BRANCH_NEXT_JUMP]:
cond_id = ctx.gctx.libarch.utils.invert_cond(blk[0])
a = Ast_AndIf(blk[0], cond_id, nxt[BRANCH_NEXT], prefetch)
a.parent = ast
a.idx_in_parent = len(ast.nodes)
ast.add(a)
ast.add(Ast_Goto(nxt[BRANCH_NEXT]))
# Add a fake branch, with this in the manage function
# all gotos to the else_addr will be invisible.
stack.append((fake_br, list(loops_stack), curr,
nxt[BRANCH_NEXT_JUMP], else_addr))
stack.append((ast, list(loops_stack), curr,
nxt[BRANCH_NEXT], else_addr))
continue
# and-if
if else_addr == nxt[BRANCH_NEXT]:
cond_id = ctx.gctx.libarch.utils.get_cond(blk[0])
a = Ast_AndIf(blk[0], cond_id, nxt[BRANCH_NEXT_JUMP], prefetch)
a.parent = ast
a.idx_in_parent = len(ast.nodes)
ast.add(a)
ast.add(Ast_Goto(nxt[BRANCH_NEXT_JUMP]))
stack.append((fake_br, list(loops_stack), curr,
nxt[BRANCH_NEXT], else_addr))
stack.append((ast, list(loops_stack), curr,
nxt[BRANCH_NEXT_JUMP], else_addr))
continue
# if-else
endpoint = search_endpoint(ctx, stack, ast, curr,
l_set, l_prev_loop, l_start)
ast_if = Ast_Branch()
ast_if.parent = ast
ast_if.level = level + 1
ast_if.idx_in_parent = len(ast.nodes)
ast_else = Ast_Branch()
ast_else.parent = ast
ast_else.level = level + 1
ast_else.idx_in_parent = len(ast.nodes)
else_addr = nxt[BRANCH_NEXT_JUMP]
if endpoint != -1:
if (l_start, endpoint) not in ctx.gph.false_loops:
# If we have already seen this address (for example the
# endpoint is the beginning of the current loop) we don't
# re-add in the waiting list.
if endpoint not in visited:
manage_endpoint(ctx, waiting, ast, -1, endpoint, l_set,
l_prev_loop, l_start, False)
else:
endpoint = -1
stack.append((ast_if, list(loops_stack), curr,
nxt[BRANCH_NEXT], else_addr))
if endpoint == -1:
# No endpoint, so it's not useful to have an else-branch
# -> the stack will continue on `ast`
a = Ast_Ifelse(blk[0], ast_else, ast_if, else_addr, prefetch)
stack.append((ast, list(loops_stack), curr,
nxt[BRANCH_NEXT_JUMP], else_addr))
a.parent = ast
a.level = level + 1
a.idx_in_parent = len(ast.nodes)
ast.add(a)
ast.add(Ast_Goto(else_addr))
elif endpoint == else_addr:
# Branch ast_else will be empty
a = Ast_Ifelse(blk[0], ast_else, ast_if, endpoint, prefetch)
stack.append((ast, list(loops_stack), curr,
nxt[BRANCH_NEXT_JUMP], else_addr))
a.parent = ast
a.level = level + 1
a.idx_in_parent = len(ast.nodes)
ast.add(a)
ast.add(Ast_Goto(else_addr))
else:
a = Ast_Ifelse(blk[0], ast_else, ast_if, endpoint, prefetch)
stack.append((ast_else, list(loops_stack), curr,
nxt[BRANCH_NEXT_JUMP], else_addr))
a.parent = ast
a.level = level + 1
a.idx_in_parent = len(ast.nodes)
ast.add(a)
ast.add(Ast_Goto(endpoint))
else:
ast.add(blk)
stack.append((ast, loops_stack, curr,
nxt[BRANCH_NEXT], else_addr))
ast = ast_head
remove_all_unnecessary_goto(ast)
fix_non_consecutives(ctx, ast)
elapsed = time()
elapsed = elapsed - start
debug__("Ast generated in %fs" % elapsed)
# Process ast
start = time()
for func in ctx.gctx.libarch.registered:
func(ctx, ast)
elapsed = time()
elapsed = elapsed - start
debug__("Functions for processing ast in %fs" % elapsed)
if ctx.gctx.color:
ctx.gctx.libarch.process_ast.assign_colors(ctx, ast)
if waiting:
ast_head.nodes.insert(0, Ast_Comment(""))
ast_head.nodes.insert(0, Ast_Comment(""))
ast_head.nodes.insert(0,
Ast_Comment("WARNING: there is a bug, the output is incomplete !"))
ast_head.nodes.insert(0, Ast_Comment(""))
ast_head.nodes.insert(0, Ast_Comment(""))
return ast, False
return ast, True
|
d4nnyk/reverse
|
reverse/lib/generate_ast.py
|
Python
|
gpl-3.0
| 22,717
|
[
"VisIt"
] |
f49d289fe721ad733ff2c1f0626567323a93bfad73ef150b4933438c9e36f3b5
|
"""Util interpolation and distance calculation methods for gps_mapper."""
import cv2
import math
import numpy as np
from collections import namedtuple
EARTH_RADIUS = 6371.0088 # radius of Earth in km
ImageInfo = namedtuple('ImageInfo', 'linewidth, height, width')
Sigmas = namedtuple('Sigmas', 'sigma_x, sigma_y')
def interpolate(points, sigmas, iminfo):
"""Connects a list of points with a line and applies Gaussian blur.
Arguments:
points: list of (x, y) points that will be plotted on the image
line_width: width in pixels of line used to interpolate between points
sigmas: namedtuple of (sigma_x, sigma_y)
Gaussian parameters (higher => more blurry)
height, width: dimensions of the output image in pixels
"""
line_width, height, width = iminfo
output = np.zeros((height, width), np.uint8)
for i in range(len(points) - 1):
pt1 = points[i]
pt2 = points[i+1]
cv2.line(output, pt1, pt2, [255, 255, 255], line_width)
output = cv2.GaussianBlur(output, sigmas, 0)
return output
def haversine(lat1, lon1, lat2, lon2):
"""Calculates the distance between two lat-lon points in kilometers."""
lat1 = math.radians(lat1)
lat2 = math.radians(lat2)
lon1 = math.radians(lon1)
lon2 = math.radians(lon2)
delta_lat = (lat2 - lat1)
delta_lon = (lon2 - lon1)
angle = (math.sin(delta_lat / 2)**2
+ math.cos(lat1) * math.cos(lat2) * math.sin(delta_lon/2)**2)
unit_distance = 2 * math.atan2(math.sqrt(angle), math.sqrt(1 - angle))
return EARTH_RADIUS * unit_distance
def normalized_points(points, top_left, ll_height, ll_width, iminfo):
"""
Takes a list of points (tuples of x, y coordinates) and an output image size
and returns a transformed list of points that fit in the output image.
"""
return [((x - top_left[0]) * iminfo.width / ll_width,
(y - top_left[1]) * iminfo.height / ll_height)
for (x, y) in points]
def normalize_single_point(point, top_left, ll_height, ll_width, iminfo):
"""Normalizes single point."""
return ((point[0] - top_left[0]) * iminfo.width / ll_width,
(point[1] - top_left[1]) * iminfo.height / ll_height)
def window(points, loc, angle, sigmas, iminfo):
"""Returns frame with the line of points that fit, given current location.
Takes a list of points, a location, an angle in radians,
and optionally a height and width in order to
return the angled rectangular region of the image of the specified size,
with the location specifying the bottom middle point of the image.
The image itself is the blurred interpolation of all the points
(we only construct the relevant portion of the rotated image).
Arguments:
points: list of (x, y) tuples representing points
loc: tuple of (x, y) representing
the current car location to center the
bottom of the window at in pixels
angle: angle in rad to rotate rectangular region by (counterclockwise)
sigmas: (sigma_x, sigma_y) Gaussian kernel parameters
iminfo: (line_width, height, width)
line width in pixels to interpolate between points
and dimensions of output image in pixels as a namedtuple
"""
out = []
parallel = (math.cos(angle), -math.sin(angle))
perpendicular = (math.sin(angle), math.cos(angle))
for (x, y) in points:
x_img = (x * parallel[0] + y * parallel[1] + loc[0]
- loc[0] * parallel[0] - loc[1] * parallel[1])
y_img = (x * perpendicular[0] + y * perpendicular[1] + loc[1]
- loc[0] * perpendicular[0] - loc[1] * perpendicular[1])
# when adding the points back,
# translate them to the proper location for the final image
out.append((x_img + (iminfo.width/2 - loc[0]), y_img + (iminfo.height - loc[1])))
return interpolate([(int(round(x)), int(round(y))) for (x, y) in out],
sigmas, iminfo)
def dimensions(points):
"""Returns the width and height in kilometers given lat/long points."""
x_vals = [x for (x, y) in points]
y_vals = [y for (x, y) in points]
top_left = (min(x_vals), max(y_vals))
bottom_right = (max(x_vals), min(y_vals))
x_range = haversine(top_left[0], top_left[1], bottom_right[0], top_left[1])
y_range = haversine(top_left[0], top_left[1], top_left[0], bottom_right[1])
return y_range, x_range
def corners(flipped_points):
"""
Takes a set of lat-lon points that have been
flipped to image array coordinates and finds
the top left and and bottom right corner coordinates.
"""
x_vals = [x for (x, y) in flipped_points]
y_vals = [y for (x, y) in flipped_points]
top_left = (min(x_vals), min(y_vals))
bottom_right = (max(x_vals), max(y_vals))
return top_left, bottom_right
|
gtagency/buzzmobile
|
buzzmobile/process/gps_mapper/interpolate.py
|
Python
|
mit
| 4,933
|
[
"Gaussian"
] |
c813f4f9e4f6e46a3ef45314217f255adcb1043a55bcfd5f697a431b29f3a616
|
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 fileencoding=utf-8
#
# MDAnalysis --- http://www.mdanalysis.org
# Copyright (c) 2006-2016 The MDAnalysis Development Team and contributors
# (see the file AUTHORS for the full list of names)
#
# Released under the GNU Public Licence, v2 or any higher version
#
# Please cite your use of MDAnalysis in published work:
#
# R. J. Gowers, M. Linke, J. Barnoud, T. J. E. Reddy, M. N. Melo, S. L. Seyler,
# D. L. Dotson, J. Domanski, S. Buchoux, I. M. Kenney, and O. Beckstein.
# MDAnalysis: A Python package for the rapid analysis of molecular dynamics
# simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th
# Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy.
#
# N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and O. Beckstein.
# MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations.
# J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787
#
import numpy as np
from numpy.testing import (
assert_,
assert_almost_equal,
assert_array_equal,
assert_array_almost_equal,
assert_equal,
assert_raises,
)
from nose.plugins.attrib import attr
from MDAnalysisTests import make_Universe
from MDAnalysisTests.datafiles import (
COORDINATES_XYZ, COORDINATES_TRR,
GRO, TRR,
GRO_velocity, PDB_xvf, TRR_xvf
)
import MDAnalysis
from MDAnalysis import NoDataError
def assert_not_view(arr):
assert_(arr.flags['OWNDATA'] == True)
def assert_correct_errormessage(func, var):
errmsg = "Timestep does not contain {}".format(var)
try:
func[0](*func[1:])
except NoDataError as e:
assert_(errmsg in e.args[0])
else:
raise AssertionError
class TestAtomGroupTrajAccess(object):
"""
For AtomGroup and Atom access:
if present:
- check return type
- check dtype of array
- check not a view of original (should always be copy!)
- check the actual values returned
if not present in trajectory:
- check we get a NoDataError
- check the error message of NDE
For AtomGroup and Atom setting:
if present:
- check AtomGroup value is updated
- check value in master Timestep object is updated
if not present, check we get proper NoDataError on setting
"""
@staticmethod
def _check_atomgroup_positions_access(u, pos):
ag = u.atoms[10:20]
ag_pos = ag.positions
assert_(isinstance(ag_pos, np.ndarray))
assert_(ag_pos.dtype == np.float32)
assert_not_view(ag_pos)
assert_array_equal(ag_pos, u.trajectory.ts.positions[10:20])
@staticmethod
def _check_atomgroup_velocities_access(u, vel):
ag = u.atoms[10:20]
if vel:
ag_vel = ag.velocities
assert_(isinstance(ag_vel, np.ndarray))
assert_(ag_vel.dtype == np.float32)
assert_not_view(ag_vel)
assert_array_equal(ag_vel, u.trajectory.ts.velocities[10:20])
else:
assert_raises(NoDataError, getattr, ag, 'velocities')
assert_correct_errormessage((getattr, ag, 'velocities'), 'velocities')
@staticmethod
def _check_atomgroup_forces_access(u, force):
ag = u.atoms[10:20]
if force:
ag_for = ag.forces
assert_(isinstance(ag_for, np.ndarray))
assert_(ag_for.dtype == np.float32)
assert_not_view(ag_for)
assert_array_equal(ag_for, u.trajectory.ts.forces[10:20])
else:
assert_raises(NoDataError, getattr, ag, 'forces')
assert_correct_errormessage((getattr, ag, 'forces'), 'forces')
@staticmethod
def _check_atom_position_access(u, pos):
at = u.atoms[55]
at_pos = at.position
assert_(isinstance(at_pos, np.ndarray))
assert_(at_pos.dtype == np.float32)
assert_not_view(at_pos)
assert_array_equal(at_pos, u.trajectory.ts.positions[55])
@staticmethod
def _check_atom_velocity_access(u, vel):
at = u.atoms[55]
if vel:
at_vel = at.velocity
assert_(isinstance(at_vel, np.ndarray))
assert_(at_vel.dtype == np.float32)
assert_not_view(at_vel)
assert_array_equal(at_vel, u.trajectory.ts.velocities[55])
else:
assert_raises(NoDataError, getattr, at, 'velocity')
assert_correct_errormessage((getattr, at, 'velocity'), 'velocities')
@staticmethod
def _check_atom_force_access(u, force):
at = u.atoms[55]
if force:
at_for = at.force
assert_(isinstance(at_for, np.ndarray))
assert_(at_for.dtype == np.float32)
assert_not_view(at_for)
assert_array_equal(at_for, u.trajectory.ts.forces[55])
else:
assert_raises(NoDataError, getattr, at, 'force')
assert_correct_errormessage((getattr, at, 'force'), 'forces')
@staticmethod
def _check_atomgroup_positions_setting(u, pos):
ag = u.atoms[[101, 107, 109]]
new = np.array([[72.4, 64.5, 74.7],
[124.6, 15.6, -1.11],
[25.2, -66.6, 0]])
ag.positions = new
assert_array_almost_equal(ag.positions, new, decimal=5)
assert_array_almost_equal(u.trajectory.ts.positions[[101, 107, 109]], new, decimal=5)
@staticmethod
def _check_atomgroup_velocities_setting(u, vel):
ag = u.atoms[[101, 107, 109]]
new = np.array([[72.4, 64.5, 74.7],
[124.6, 15.6, -1.11],
[25.2, -66.6, 0]]) + 0.1
if vel:
ag.velocities = new
assert_array_almost_equal(ag.velocities, new, decimal=5)
assert_array_almost_equal(u.trajectory.ts.velocities[[101, 107, 109]], new, decimal=5)
else:
assert_raises(NoDataError, setattr, ag, 'velocities', new)
assert_correct_errormessage((setattr, ag, 'velocities', new), 'velocities')
@staticmethod
def _check_atomgroup_forces_setting(u, force):
ag = u.atoms[[101, 107, 109]]
new = np.array([[72.4, 64.5, 74.7],
[124.6, 15.6, -1.11],
[25.2, -66.6, 0]]) + 0.2
if force:
ag.forces = new
assert_array_almost_equal(ag.forces, new, decimal=5)
assert_array_almost_equal(u.trajectory.ts.forces[[101, 107, 109]], new, decimal=5)
else:
assert_raises(NoDataError, setattr, ag, 'forces', new)
assert_correct_errormessage((setattr, ag, 'forces', new), 'forces')
@staticmethod
def _check_atom_position_setting(u, pos):
at = u.atoms[94]
new = np.array([58.3, -10.1, 0.001])
at.position = new
assert_array_almost_equal(at.position, new, decimal=5)
assert_array_almost_equal(u.trajectory.ts.positions[94], new, decimal=5)
@staticmethod
def _check_atom_velocity_setting(u, vel):
at = u.atoms[94]
new = np.array([58.3, -10.1, 0.001]) + 0.1
if vel:
at.velocity = new
assert_array_almost_equal(at.velocity, new, decimal=5)
assert_array_almost_equal(u.trajectory.ts.velocities[94], new, decimal=5)
else:
assert_raises(NoDataError, setattr, at, 'velocity', new)
assert_correct_errormessage((setattr, at, 'velocity', new), 'velocities')
@staticmethod
def _check_atom_force_setting(u, force):
at = u.atoms[94]
new = np.array([58.3, -10.1, 0.001]) + 0.2
if force:
at.force = new
assert_array_almost_equal(at.force, new, decimal=5)
assert_array_almost_equal(u.trajectory.ts.forces[94], new, decimal=5)
else:
assert_raises(NoDataError, setattr, at, 'force', new)
assert_correct_errormessage((setattr, at, 'force', new), 'forces')
def test_all(self):
# all combinations of which trajectory attributes we have
# positions is always present
for pos, vel, force in (
(True, False, False),
(True, True, False),
(True, False, True),
(True, True, True),
):
u = make_Universe(trajectory=pos, velocities=vel, forces=force)
# AtomGroup access
yield self._check_atomgroup_positions_access, u, pos
yield self._check_atomgroup_velocities_access, u, vel
yield self._check_atomgroup_forces_access, u, force
# Atom access
yield self._check_atom_position_access, u, pos
yield self._check_atom_velocity_access, u, vel
yield self._check_atom_force_access, u, force
# AtomGroup setting
yield self._check_atomgroup_positions_setting, u, pos
yield self._check_atomgroup_velocities_setting, u, vel
yield self._check_atomgroup_forces_setting, u, force
# Atom setting
yield self._check_atom_position_setting, u, pos
yield self._check_atom_velocity_setting, u, vel
yield self._check_atom_force_setting, u, force
class TestAtom_ForceVelocity(object):
def setUp(self):
self.u = MDAnalysis.Universe(PDB_xvf, TRR_xvf)
self.a = self.u.atoms[0]
def tearDown(self):
del self.u
del self.a
def test_atom_force_get(self):
assert_equal(self.a.force, self.u.atoms.forces[0])
def test_atom_velocity_get(self):
assert_equal(self.a.velocity, self.u.atoms.velocities[0])
def test_atom_force_set(self):
ref = np.arange(3)
self.a.force = ref
assert_equal(self.a.force, ref)
assert_equal(self.u.atoms.forces[0], ref)
def test_atom_velocity_set(self):
ref = np.arange(3)
self.a.velocity = ref
assert_equal(self.a.velocity, ref)
assert_equal(self.u.atoms.velocities[0], ref)
def test_pos_iteration(self):
ag = self.u.atoms[[0]]
val = np.array([self.a.position for ts in self.u.trajectory])
ref = np.array([ag.positions[0] for ts in self.u.trajectory])
assert_array_equal(val, ref)
def test_vel_iteration(self):
ag = self.u.atoms[[0]]
val = np.array([self.a.velocity for ts in self.u.trajectory])
ref = np.array([ag.velocities[0] for ts in self.u.trajectory])
assert_array_equal(val, ref)
def test_for_iteration(self):
ag = self.u.atoms[[0]]
val = np.array([self.a.force for ts in self.u.trajectory])
ref = np.array([ag.forces[0] for ts in self.u.trajectory])
assert_array_equal(val, ref)
class TestGROVelocities(object):
def setUp(self):
#reference velocities for the full 6-atom test case:
self.reference_velocities = np.array(
[[-101.227, -0.57999998, 0.43400002],
[8.08500004, 3.19099998, -7.79099989],
[-9.04500008, -26.46899986, 13.17999935],
[2.51899981, 3.1400001, -1.73399997],
[-10.64100075, -11.34899998, 0.257],
[19.42700005, -8.21600056, -0.24399999]], dtype=np.float32)
self.prec = 3
def testParse_velocities(self):
#read the velocities from the GRO_velocity file and compare the AtomGroup and individual Atom velocities
# parsed with the reference values:
u = MDAnalysis.Universe(GRO_velocity)
all_atoms = u.select_atoms('all')
#check for read-in and unit conversion for .gro file velocities for the entire AtomGroup:
assert_almost_equal(all_atoms.velocities, self.reference_velocities, self.prec,
err_msg="problem reading .gro file velocities")
#likewise for each individual atom (to be robust--in case someone alters the individual atom property code):
assert_almost_equal(all_atoms[0].velocity, self.reference_velocities[0], self.prec,
err_msg="problem reading .gro file velocities")
assert_almost_equal(all_atoms[1].velocity, self.reference_velocities[1], self.prec,
err_msg="problem reading .gro file velocities")
assert_almost_equal(all_atoms[2].velocity, self.reference_velocities[2], self.prec,
err_msg="problem reading .gro file velocities")
assert_almost_equal(all_atoms[3].velocity, self.reference_velocities[3], self.prec,
err_msg="problem reading .gro file velocities")
assert_almost_equal(all_atoms[4].velocity, self.reference_velocities[4], self.prec,
err_msg="problem reading .gro file velocities")
assert_almost_equal(all_atoms[5].velocity, self.reference_velocities[5], self.prec,
err_msg="problem reading .gro file velocities")
class TestTRRForces(object):
def setUp(self):
self.universe = MDAnalysis.Universe(PDB_xvf, TRR_xvf)
# extracted protein forces with g_traj into cobrotoxin_protein_forces.xvg.bz2
# and manually averaged over 918 atoms and 3 time steps
# native units: kJ/(mol*nm)
self.reference_mean_protein_force_native = np.array(
[3.4609879271822823, -0.63302345167392804, -1.0587882545813336], dtype=np.float32)
# MDAnalysis units of kJ/(mol*A)
self.reference_mean_protein_force = self.reference_mean_protein_force_native / 10
self.prec = 6
def tearDown(self):
del self.universe
@attr('slow')
def testForces(self):
protein = self.universe.select_atoms("protein")
assert_equal(len(protein), 918)
mean_F = np.mean([protein.forces.mean(axis=0) for ts in self.universe.trajectory], axis=0)
assert_almost_equal(mean_F, self.reference_mean_protein_force, self.prec,
err_msg="mean force on protein over whole trajectory does not match")
class TestTRRForcesNativeUnits(TestTRRForces):
def setUp(self):
super(TestTRRForcesNativeUnits, self).setUp()
# get universe without conversion
self.universe = MDAnalysis.Universe(PDB_xvf, TRR_xvf, convert_units=False)
# native Gromacs TRR units kJ/(mol*nm)
self.reference_mean_protein_force = self.reference_mean_protein_force_native
class TestAtomGroupVelocities(object):
"""Tests of velocity-related functions in AtomGroup"""
def setUp(self):
self.universe = MDAnalysis.Universe(GRO, TRR)
self.ag = self.universe.select_atoms("bynum 12:42")
def tearDown(self):
del self.ag
del self.universe
@attr('slow')
def test_get_velocities(self):
v = self.ag.velocities
assert_(np.any(np.abs(v) > 1e-6), "velocities should be non-zero")
@attr('slow')
def test_velocities(self):
ag = self.universe.atoms[42:45]
ref_v = np.array([
[-3.61757946, -4.9867239, 2.46281552],
[2.57792854, 3.25411797, -0.75065529],
[13.91627216, 30.17778587, -12.16669178]])
v = ag.velocities
assert_almost_equal(v, ref_v, err_msg="velocities were not read correctly")
@attr('slow')
def test_set_velocities(self):
ag = self.ag
v = ag.velocities - 2.7271
ag.velocities = v
assert_almost_equal(ag.velocities, v,
err_msg="messages were not set to new value")
class TestAtomGroupForces(object):
"""Tests of velocity-related functions in AtomGroup"""
def setUp(self):
self.universe = MDAnalysis.Universe(COORDINATES_XYZ, COORDINATES_TRR)
self.ag = self.universe.atoms[1:4]
def tearDown(self):
del self.universe
@attr('slow')
def test_get_forces(self):
v = self.ag.forces
assert_(np.any(np.abs(v) > 1e-6), "forces should be non-zero")
@attr('slow')
def test_forces(self):
ag = self.universe.atoms[1:4]
ref_v = np.arange(9).reshape(3, 3) * .01 + .03
v = ag.forces
assert_almost_equal(v, ref_v, err_msg="forces were not read correctly")
@attr('slow')
def test_set_forces(self):
ag = self.ag
v = ag.forces - 2.7271
ag.forces = v
assert_almost_equal(ag.forces, v,
err_msg="messages were not set to new value")
|
alejob/mdanalysis
|
testsuite/MDAnalysisTests/core/test_group_traj_access.py
|
Python
|
gpl-2.0
| 16,491
|
[
"Gromacs",
"MDAnalysis"
] |
9c89d194d9acaf3a961ee365fb2828a09261c87f8baa7b64009b666633064a46
|
# https://www.hackerrank.com/challenges/bon-appetit
# Anna and Brian ate n dishes, but anna didn't eat from the kth one
# because of an allergy, so that one doesn't go into tab splitting.
n, k = map(int, raw_input().split())
# The list of costs for the n dishes.
costs = map(int, raw_input().split())
# What Anna should ideally pay, is the total of the dishes minus
# the one she didn't eat from, divided by two.
ideal = (sum(costs) - costs[k]) / 2.0
# The actual amount she paid.
paid = int(raw_input())
# If the paid the ideal, then Bon Appetit!
if paid == ideal:
print "Bon Appetit"
# She either overpaid or underpaid, so print the difference.
else:
print int(abs(paid - ideal))
|
zubie7a/Algorithms
|
HackerRank/Algorithms/02_Implementation/07_Bon_Appetit.py
|
Python
|
mit
| 690
|
[
"Brian"
] |
3bd1167b43911e73c6a62ad182895a2d19723d3a301d7663404b0e45c6c4c389
|
from __future__ import absolute_import, division, print_function
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from warnings import warn
import types
import copy
import traceback
import inspect
from future.builtins import zip
from . import (UnrecognizedFormatError, InvalidRegistrationError,
DuplicateRegistrationError, ArgumentOverrideWarning,
FormatIdentificationWarning)
from .util import open_file, open_files
_formats = {}
_sniffers = {}
_aliases = {}
_empty_file_format = '<emptyfile>'
# We create a class and instantiate it dynamically so that exceptions are more
# obvious and so that only one object exists without copying this line.
FileSentinel = type('FileSentinel', (object, ), {})()
def _override_kwargs(kw, fmt_kw, warn_user):
for key in kw:
if key in fmt_kw and fmt_kw[key] != kw[key] and warn_user:
warn('Best guess was: %s=%s, continuing with user supplied: %s' % (
key, str(fmt_kw[key]), str(kw[key])
), ArgumentOverrideWarning)
fmt_kw[key] = kw[key]
return fmt_kw
def register_sniffer(format):
"""Return a decorator for a sniffer function.
A decorator factory for sniffer functions. Sniffers may only be registered
to simple formats. Sniffers for compound formats are automatically
generated from their component simple formats.
A sniffer function should have at least the following signature:
``<format_name>_sniffer(fh)``. `fh` is **always** an open filehandle.
This decorator provides the ability to use filepaths in the same argument
position as `fh`. They will automatically be opened and closed.
**The sniffer must not close the filehandle**, cleanup will be
handled external to the sniffer and is not its concern.
`**kwargs` are not passed to a sniffer, and a sniffer must not use them.
The job of a sniffer is to determine if a file appears to be in the given
format and to 'sniff' out any kwargs that would be of use to a reader
function.
The sniffer **must** return a tuple of (True, <kwargs dict>) if it believes
`fh` is a given `format`. Otherwise it should return (False, {}).
.. note:: Failure to adhere to the above interface specified for a sniffer
will result in unintended side-effects.
The sniffer may determine membership of a file in as many or as few
lines of the file as it deems necessary.
Parameters
----------
format : str
A format name which a decorated sniffer will be bound to.
Returns
-------
function
A decorator to be used on a sniffer. The decorator will raise a
``skbio.io.DuplicateRegistrationError`` if there already exists a
*sniffer* bound to the `format`.
See Also
--------
skbio.io.sniff
"""
def decorator(sniffer):
if format in _sniffers:
raise DuplicateRegistrationError(msg="'%s' already has a sniffer."
% format)
def wrapped_sniffer(fp, mode='U', **kwargs):
with open_file(fp, mode) as fh:
# The reason we do a copy is because we need the sniffer to not
# mutate the orginal file while guessing the format. The
# naive solution would be to seek to 0 at the end, but that
# would break an explicit offset provided by the user. Instead
# we create a shallow copy which works out of the box for
# file-like object, but does not work for real files. Instead
# the name attribute is reused in open for a new filehandle.
# Using seek and tell is not viable because in real files tell
# reflects the position of the read-ahead buffer and not the
# true offset of the iterator.
if hasattr(fh, 'name'):
cfh = open(fh.name, fh.mode)
else:
cfh = copy.copy(fh)
cfh.seek(0)
try:
return sniffer(cfh, **kwargs)
except Exception:
warn("'%s' has encountered a problem.\n"
"Please send the following to our issue tracker at\n"
"https://github.com/biocore/scikit-bio/issues\n\n"
"%s" % (sniffer.__name__, traceback.format_exc()),
FormatIdentificationWarning)
return False, {}
finally:
cfh.close()
wrapped_sniffer.__doc__ = sniffer.__doc__
wrapped_sniffer.__name__ = sniffer.__name__
_sniffers[format] = wrapped_sniffer
return wrapped_sniffer
return decorator
def register_reader(format, cls=None):
"""Return a decorator for a reader function.
A decorator factory for reader functions.
A reader function should have at least the following signature:
``<format_name>_to_<class_name_or_generator>(fh)``. `fh` is **always** an
open filehandle. This decorator provides the ability to use filepaths in
the same argument position as `fh`. They will automatically be opened and
closed.
**The reader must not close the filehandle**, cleanup will be
handled external to the reader and is not its concern. This is true even
in the case of generators.
Any additional `**kwargs` will be passed to the reader and may
be used if necessary.
The reader **must** return an instance of `cls` if `cls` is not None.
Otherwise the reader must return a generator. The generator need not deal
with closing the `fh`. That is already handled by this decorator.
.. note:: Failure to adhere to the above interface specified for a reader
will result in unintended side-effects.
Parameters
----------
format : str
A format name which a decorated reader will be bound to.
cls : type, optional
The class which a decorated reader will be bound to. When `cls` is None
the reader will be bound as returning a generator.
Default is None.
Returns
-------
function
A decorator to be used on a reader. The decorator will raise a
``skbio.io.DuplicateRegistrationError`` if there already exists a
*reader* bound to the same permutation of `fmt` and `cls`.
See Also
--------
skbio.io.read
"""
def decorator(reader):
format_class = _formats.setdefault(format, {}).setdefault(cls, {})
if 'reader' in format_class:
raise DuplicateRegistrationError('reader', format, cls)
file_args = []
reader_spec = inspect.getargspec(reader)
if reader_spec.defaults is not None:
# Concept from http://stackoverflow.com/a/12627202/579416
for key, default in zip(
reader_spec.args[-len(reader_spec.defaults):],
reader_spec.defaults):
if default is FileSentinel:
file_args.append(key)
# We wrap the reader so that basic file handling can be managed
# externally from the business logic.
if cls is None:
def wrapped_reader(fp, mode='U', mutate_fh=False, **kwargs):
file_keys = []
files = [fp]
for file_arg in file_args:
if file_arg in kwargs:
if kwargs[file_arg] is not None:
file_keys.append(file_arg)
files.append(kwargs[file_arg])
else:
kwargs[file_arg] = None
with open_files(files, mode) as fhs:
try:
for key, fh in zip(file_keys, fhs[1:]):
kwargs[key] = fh
generator = reader(fhs[0], **kwargs)
if not isinstance(generator, types.GeneratorType):
# Raise an exception to be handled next line,
# because although reader executed without error,
# it is not a generator.
raise Exception()
# If an exception is thrown at this point, it cannot
# be a generator. If there was a `yield` statment, then
# Python would have returned a generator regardless of the
# content. This does not preclude the generator from
# throwing exceptions.
except Exception:
raise InvalidRegistrationError("'%s' is not a "
"generator." %
reader.__name__)
while True:
yield next(generator)
else:
# When an object is instantiated we don't need to worry about the
# original position at every step, only at the end.
def wrapped_reader(fp, mode='U', mutate_fh=False, **kwargs):
file_keys = []
files = [fp]
for file_arg in file_args:
if file_arg in kwargs:
if kwargs[file_arg] is not None:
file_keys.append(file_arg)
files.append(kwargs[file_arg])
else:
kwargs[file_arg] = None
with open_files(files, mode) as fhs:
for key, fh in zip(file_keys, fhs[1:]):
kwargs[key] = fh
return reader(fhs[0], **kwargs)
wrapped_reader.__doc__ = reader.__doc__
wrapped_reader.__name__ = reader.__name__
format_class['reader'] = wrapped_reader
return wrapped_reader
return decorator
def register_writer(format, cls=None):
"""Return a decorator for a writer function.
A decorator factory for writer functions.
A writer function should have at least the following signature:
``<class_name_or_generator>_to_<format_name>(obj, fh)``. `fh` is **always**
an open filehandle. This decorator provides the ability to use filepaths in
the same argument position as `fh`. They will automatically be opened and
closed.
**The writer must not close the filehandle**, cleanup will be
handled external to the reader and is not its concern.
Any additional `**kwargs` will be passed to the writer and may be used if
necessary.
The writer must not return a value. Instead it should only mutate the `fh`
in a way consistent with it's purpose.
If the writer accepts a generator, it should exhaust the generator to
ensure that the potentially open filehandle backing said generator is
closed.
.. note:: Failure to adhere to the above interface specified for a writer
will result in unintended side-effects.
Parameters
----------
format : str
A format name which a decorated writer will be bound to.
cls : type, optional
The class which a decorated writer will be bound to. If `cls` is None
the writer will be bound as expecting a generator.
Default is None.
Returns
-------
function
A decorator to be used on a writer. The decorator will raise a
``skbio.io.DuplicateRegistrationError`` if there already exists a
*writer* bound to the same permutation of `fmt` and `cls`.
See Also
--------
skbio.io.write
skbio.io.get_writer
"""
def decorator(writer):
format_class = _formats.setdefault(format, {}).setdefault(cls, {})
if 'writer' in format_class:
raise DuplicateRegistrationError('writer', format, cls)
file_args = []
writer_spec = inspect.getargspec(writer)
if writer_spec.defaults is not None:
# Concept from http://stackoverflow.com/a/12627202/579416
for key, default in zip(
writer_spec.args[-len(writer_spec.defaults):],
writer_spec.defaults):
if default is FileSentinel:
file_args.append(key)
# We wrap the writer so that basic file handling can be managed
# externally from the business logic.
def wrapped_writer(obj, fp, mode='w', **kwargs):
file_keys = []
files = [fp]
for file_arg in file_args:
if file_arg in kwargs:
if kwargs[file_arg] is not None:
file_keys.append(file_arg)
files.append(kwargs[file_arg])
else:
kwargs[file_arg] = None
with open_files(files, mode) as fhs:
for key, fh in zip(file_keys, fhs[1:]):
kwargs[key] = fh
writer(obj, fhs[0], **kwargs)
wrapped_writer.__doc__ = writer.__doc__
wrapped_writer.__name__ = writer.__name__
format_class['writer'] = wrapped_writer
return wrapped_writer
return decorator
def list_read_formats(cls):
"""Return a list of available read formats for a given `cls` type.
Parameters
----------
cls : type
The class which will be used to determine what read formats exist for
an instance of `cls`.
Returns
-------
list
A list of available read formats for an instance of `cls`. List may be
empty.
See Also
--------
skbio.io.register_reader
"""
return _rw_list_formats('reader', cls)
def list_write_formats(cls):
"""Return a list of available write formats for a given `cls` instance.
Parameters
----------
cls : type
The class which will be used to determine what write formats exist for
an instance of `cls`.
Returns
-------
list
A list of available write formats for an instance of `cls`. List may be
empty.
See Also
--------
skbio.io.register_writer
"""
return _rw_list_formats('writer', cls)
def _rw_list_formats(name, cls):
formats = []
for fmt in _formats:
if cls in _formats[fmt] and name in _formats[fmt][cls]:
formats.append(fmt)
return formats
def get_sniffer(format):
"""Return a sniffer for a format.
Parameters
----------
format : str
A format string which has a registered sniffer.
Returns
-------
function or None
Returns a sniffer function if one exists for the given `fmt`.
Otherwise it will return None.
See Also
--------
skbio.io.register_sniffer
"""
return _sniffers.get(format, None)
def get_reader(format, cls=None):
"""Return a reader for a format.
Parameters
----------
format : str
A registered format string.
cls : type, optional
The class which the reader will return an instance of. If `cls` is
None, the reader will return a generator.
Default is None.
Returns
-------
function or None
Returns a reader function if one exists for a given `fmt` and `cls`.
Otherwise it will return None.
See Also
--------
skbio.io.register_reader
"""
return _rw_getter('reader', format, cls)
def get_writer(format, cls=None):
"""Return a writer for a format.
Parameters
----------
format : str
A registered format string.
cls : type, optional
The class which the writer will expect an instance of. If `cls` is
None, the writer will expect a generator that is identical to what
is returned by ``get_reader(<some_format>, None)``.
Default is None.
Returns
-------
function or None
Returns a writer function if one exists for a given `fmt` and `cls`.
Otherwise it will return None.
See Also
--------
skbio.io.register_writer
skbio.io.get_reader
"""
return _rw_getter('writer', format, cls)
def _rw_getter(name, fmt, cls):
if fmt in _formats:
if cls in _formats[fmt] and name in _formats[fmt][cls]:
return _formats[fmt][cls][name]
return None
def sniff(fp, cls=None, mode='U'):
"""Attempt to guess the format of a file and return format str and kwargs.
Parameters
----------
fp : filepath or filehandle
The provided file to guess the format of. Filepaths are automatically
closed; filehandles are the responsibility of the caller.
cls : type, optional
A provided class that restricts the search for the format. Only formats
which have a registered reader or writer for the given `cls` will be
tested.
Default is None.
Returns
-------
(str, kwargs)
A format name and kwargs for the corresponding reader.
Raises
------
UnrecognizedFormatError
This occurs when the format is not 'claimed' by any registered sniffer
or when the format is ambiguous and has been 'claimed' by more than one
sniffer.
See Also
--------
skbio.io.register_sniffer
"""
possibles = []
for fmt in _sniffers:
if cls is not None and fmt != _empty_file_format and (
fmt not in _formats or cls not in _formats[fmt]):
continue
format_sniffer = _sniffers[fmt]
is_format, fmt_kwargs = format_sniffer(fp, mode=mode)
if is_format:
possibles.append(fmt)
kwargs = fmt_kwargs
if not possibles:
raise UnrecognizedFormatError("Cannot guess the format for %s."
% str(fp))
if len(possibles) > 1:
raise UnrecognizedFormatError("File format is ambiguous, may be"
" one of %s." % str(possibles))
return possibles[0], kwargs
def read(fp, format=None, into=None, verify=True, mode='U', **kwargs):
"""Read a supported skbio file format into an instance or a generator.
This function is able to reference and execute all *registered* read
operations in skbio.
Parameters
----------
fp : filepath or filehandle
The location to read the given `format` `into`. Filepaths are
automatically closed when read; filehandles are the responsibility
of the caller. In the case of a generator, a filepath will be closed
when ``StopIteration`` is raised; filehandles are still the
responsibility of the caller.
format : str, optional
The format must be a format name with a reader for the given
`into` class. If a `format` is not provided or is None, all
registered sniffers for the provied `into` class will be evaluated to
attempt to guess the format.
Default is None.
into : type, optional
A class which has a registered reader for a given `format`. If `into`
is not provided or is None, read will return a generator.
Default is None.
verify : bool, optional
Whether or not to confirm the format of a file if `format` is provided.
Will raise a ``skbio.io.FormatIdentificationWarning`` if the sniffer of
`format` returns False.
Default is True.
mode : str, optional
The read mode. This is passed to `open(fp, mode)` internally.
Default is 'U'
kwargs : dict, optional
Will be passed directly to the appropriate reader.
Returns
-------
object or generator
If `into` is not None, an instance of the `into` class will be
provided with internal state consistent with the provided file.
If `into` is None, a generator will be returned.
Raises
------
ValueError
Raised when `format` and `into` are both None.
skbio.io.UnrecognizedFormatError
Raised when a reader could not be found for a given `format` or the
format could not be guessed.
skbio.io.FormatIdentificationWarning
Raised when `verify` is True and the sniffer of a `format` provided a
kwarg value that did not match the user's kwarg value.
See Also
--------
skbio.io.register_reader
skbio.io.register_sniffer
"""
if format is None and into is None:
raise ValueError("`format` and `into` cannot both be None.")
if format is None:
format, fmt_kwargs = sniff(fp, cls=into, mode=mode)
kwargs = _override_kwargs(kwargs, fmt_kwargs, verify)
elif verify:
sniffer = get_sniffer(format)
if sniffer is not None:
is_format, fmt_kwargs = sniffer(fp)
if not is_format:
warn("%s could not be positively identified as %s file." %
(str(fp), format),
FormatIdentificationWarning)
else:
kwargs = _override_kwargs(kwargs, fmt_kwargs, True)
reader = get_reader(format, into)
if reader is None:
raise UnrecognizedFormatError("Cannot read %s into %s, no reader "
"found." % (format, into.__name__
if into is not None
else 'generator'))
return reader(fp, mode=mode, **kwargs)
def write(obj, format, into, mode='w', **kwargs):
"""Write a supported skbio file format from an instance or a generator.
This function is able to reference and execute all *registered* write
operations in skbio.
Parameters
----------
obj : object
The object must have a registered writer for a provided `format`.
format : str
The format must be a registered format name with a writer for the given
`obj`.
into : filepath or filehandle
The location to write the given `format` from `obj` into. Filepaths are
automatically closed when written; filehandles are the responsibility
of the caller.
mode : str, optional
The write mode. This is passed to `open(fp, mode)` internally.
Default is 'w'.
kwargs : dict, optional
Will be passed directly to the appropriate writer.
Raises
------
skbio.io.UnrecognizedFormatError
Raised when a writer could not be found for the given `format` and
`obj`.
See Also
--------
skbio.io.register_writer
"""
cls = None
if not isinstance(obj, types.GeneratorType):
cls = obj.__class__
writer = get_writer(format, cls)
if writer is None:
raise UnrecognizedFormatError("Cannot write %s into %s, no %s writer "
"found." % (format, str(into),
'generator' if cls is None
else str(cls)))
writer(obj, into, mode=mode, **kwargs)
# This is meant to be a handy indicator to the user that they have done
# something wrong.
@register_sniffer(_empty_file_format)
def empty_file_sniffer(fh):
for line in fh:
if line.strip():
return False, {}
return True, {}
def initialize_oop_interface():
classes = set()
# Find each potential class
for fmt in _formats:
for cls in _formats[fmt]:
classes.add(cls)
# Add readers and writers for each class
for cls in classes:
if cls is not None:
_apply_read(cls)
_apply_write(cls)
def _apply_read(cls):
"""Add read method if any formats have a registered reader for `cls`."""
skbio_io_read = globals()['read']
read_formats = list_read_formats(cls)
if read_formats:
@classmethod
def read(cls, fp, format=None, **kwargs):
return skbio_io_read(fp, into=cls, format=format, **kwargs)
read.__func__.__doc__ = _read_docstring % (
cls.__name__,
_formats_for_docs(read_formats),
cls.__name__,
cls.__name__,
cls.__name__,
_import_paths(read_formats)
)
cls.read = read
def _apply_write(cls):
"""Add write method if any formats have a registered writer for `cls`."""
skbio_io_write = globals()['write']
write_formats = list_write_formats(cls)
if write_formats:
if not hasattr(cls, 'default_write_format'):
raise NotImplementedError(
"Classes with registered writers must provide a "
"`default_write_format`. Please add `default_write_format` to"
" '%s'." % cls.__name__)
def write(self, fp, format=cls.default_write_format, **kwargs):
skbio_io_write(self, into=fp, format=format, **kwargs)
write.__doc__ = _write_docstring % (
cls.__name__,
_formats_for_docs(write_formats),
cls.__name__,
cls.default_write_format,
_import_paths(write_formats)
)
cls.write = write
def _import_paths(formats):
lines = []
for fmt in formats:
lines.append("skbio.io." + fmt)
return '\n'.join(lines)
def _formats_for_docs(formats):
lines = []
for fmt in formats:
lines.append("- ``'%s'`` (:mod:`skbio.io.%s`)" % (fmt, fmt))
return '\n'.join(lines)
_read_docstring = """Create a new ``%s`` instance from a file.
This is a convenience method for :mod:`skbio.io.read`. For more
information about the I/O system in scikit-bio, please see
:mod:`skbio.io`.
Supported file formats include:
%s
Parameters
----------
fp : filepath or filehandle
The location to read the given `format`. Filepaths are
automatically closed when read; filehandles are the
responsibility of the caller.
format : str, optional
The format must be a format name with a reader for ``%s``.
If a `format` is not provided or is None, it will attempt to
guess the format.
kwargs : dict, optional
Keyword arguments passed to :mod:`skbio.io.read` and the file
format reader for ``%s``.
Returns
-------
%s
A new instance.
See Also
--------
write
skbio.io.read
%s
"""
_write_docstring = """Write an instance of ``%s`` to a file.
This is a convenience method for :mod:`skbio.io.write`. For more
information about the I/O system in scikit-bio, please see
:mod:`skbio.io`.
Supported file formats include:
%s
Parameters
----------
fp : filepath or filehandle
The location to write the given `format` into. Filepaths are
automatically closed when written; filehandles are the
responsibility of the caller.
format : str
The format must be a registered format name with a writer for
``%s``.
Default is `'%s'`.
kwargs : dict, optional
Keyword arguments passed to :mod:`skbio.io.write` and the
file format writer.
See Also
--------
read
skbio.io.write
%s
"""
|
Kleptobismol/scikit-bio
|
skbio/io/_registry.py
|
Python
|
bsd-3-clause
| 27,288
|
[
"scikit-bio"
] |
25f46ab8c6d4ecc3373dfbe044566c23ce32b42ba678e12d613542d38c251885
|
# encoding: utf-8
import re
TLDS = [
"ac", "ad", "ae", "af", "ag", "ai", "al", "am", "an", "ao", "aq", "ar",
"as", "at", "au", "aw", "ax", "az", "ba", "bb", "bd", "be", "bf", "bg",
"bh", "bi", "bj", "bl", "bm", "bn", "bo", "bq", "br", "bs", "bt", "bv",
"bw", "by", "bz", "ca", "cc", "cd", "cf", "cg", "ch", "ci", "ck", "cl",
"cm", "cn", "co", "cr", "cu", "cv", "cw", "cx", "cy", "cz", "de", "dj",
"dk", "dm", "do", "dz", "ec", "ee", "eg", "eh", "er", "es", "et", "eu",
"fi", "fj", "fk", "fm", "fo", "fr", "ga", "gb", "gd", "ge", "gf", "gg",
"gh", "gi", "gl", "gm", "gn", "gp", "gq", "gr", "gs", "gt", "gu", "gw",
"gy", "hk", "hm", "hn", "hr", "ht", "hu", "id", "ie", "il", "im", "in",
"io", "iq", "ir", "is", "it", "je", "jm", "jo", "jp", "ke", "kg", "kh",
"ki", "km", "kn", "kp", "kr", "kw", "ky", "kz", "la", "lb", "lc", "li",
"lk", "lr", "ls", "lt", "lu", "lv", "ly", "ma", "mc", "md", "me", "mf",
"mg", "mh", "mk", "ml", "mm", "mn", "mo", "mp", "mq", "mr", "ms", "mt",
"mu", "mv", "mw", "mx", "my", "mz", "na", "nc", "ne", "nf", "ng", "ni",
"nl", "no", "np", "nr", "nu", "nz", "om", "pa", "pe", "pf", "pg", "ph",
"pk", "pl", "pm", "pn", "pr", "ps", "pt", "pw", "py", "qa", "re", "ro",
"rs", "ru", "rw", "sa", "sb", "sc", "sd", "se", "sg", "sh", "si", "sj",
"sk", "sl", "sm", "sn", "so", "sr", "ss", "st", "su", "sv", "sx", "sy",
"sz", "tc", "td", "tf", "tg", "th", "tj", "tk", "tl", "tm", "tn", "to",
"tp", "tr", "tt", "tv", "tw", "tz", "ua", "ug", "uk", "um", "us", "uy",
"uz", "va", "vc", "ve", "vg", "vi", "vn", "vu", "wf", "ws", "ye", "yt",
"za", "zm", "zw", "ελ", "бел", "мкд", "мон", "рф", "срб", "укр", "қаз",
"հայ", "الاردن", "الجزائر", "السعودية", "المغرب", "امارات", "ایران", "بھارت",
"تونس", "سودان", "سورية", "عراق", "عمان", "فلسطين", "قطر", "مصر",
"مليسيا", "پاکستان", "भारत", "বাংলা", "ভারত", "ਭਾਰਤ", "ભારત",
"இந்தியா", "இலங்கை", "சிங்கப்பூர்", "భారత్", "ලංකා", "ไทย",
"გე", "中国", "中國", "台湾", "台灣", "新加坡", "澳門", "香港", "한국", "neric:",
"abb", "abbott", "abogado", "academy", "accenture", "accountant",
"accountants", "aco", "active", "actor", "ads", "adult", "aeg", "aero",
"afl", "agency", "aig", "airforce", "airtel", "allfinanz", "alsace",
"amsterdam", "android", "apartments", "app", "aquarelle", "archi", "army",
"arpa", "asia", "associates", "attorney", "auction", "audio", "auto",
"autos", "axa", "azure", "band", "bank", "bar", "barcelona", "barclaycard",
"barclays", "bargains", "bauhaus", "bayern", "bbc", "bbva", "bcn", "beer",
"bentley", "berlin", "best", "bet", "bharti", "bible", "bid", "bike",
"bing", "bingo", "bio", "biz", "black", "blackfriday", "bloomberg", "blue",
"bmw", "bnl", "bnpparibas", "boats", "bond", "boo", "boots", "boutique",
"bradesco", "bridgestone", "broker", "brother", "brussels", "budapest",
"build", "builders", "business", "buzz", "bzh", "cab", "cafe", "cal",
"camera", "camp", "cancerresearch", "canon", "capetown", "capital",
"caravan", "cards", "care", "career", "careers", "cars", "cartier",
"casa", "cash", "casino", "cat", "catering", "cba", "cbn", "ceb", "center",
"ceo", "cern", "cfa", "cfd", "chanel", "channel", "chat", "cheap",
"chloe", "christmas", "chrome", "church", "cisco", "citic", "city",
"claims", "cleaning", "click", "clinic", "clothing", "cloud", "club",
"coach", "codes", "coffee", "college", "cologne", "com", "commbank",
"community", "company", "computer", "condos", "construction", "consulting",
"contractors", "cooking", "cool", "coop", "corsica", "country", "coupons",
"courses", "credit", "creditcard", "cricket", "crown", "crs", "cruises",
"cuisinella", "cymru", "cyou", "dabur", "dad", "dance", "date", "dating",
"datsun", "day", "dclk", "deals", "degree", "delivery", "delta",
"democrat", "dental", "dentist", "desi", "design", "dev", "diamonds",
"diet", "digital", "direct", "directory", "discount", "dnp", "docs",
"dog", "doha", "domains", "doosan", "download", "drive", "durban", "dvag",
"earth", "eat", "edu", "education", "email", "emerck", "energy",
"engineer", "engineering", "enterprises", "epson", "equipment", "erni",
"esq", "estate", "eurovision", "eus", "events", "everbank", "exchange",
"expert", "exposed", "express", "fage", "fail", "faith", "family", "fan",
"fans", "farm", "fashion", "feedback", "film", "finance", "financial",
"firmdale", "fish", "fishing", "fit", "fitness", "flights", "florist",
"flowers", "flsmidth", "fly", "foo", "football", "forex", "forsale",
"forum", "foundation", "frl", "frogans", "fund", "furniture", "futbol",
"fyi", "gal", "gallery", "game", "garden", "gbiz", "gdn", "gent",
"genting", "ggee", "gift", "gifts", "gives", "giving", "glass", "gle",
"global", "globo", "gmail", "gmo", "gmx", "gold", "goldpoint", "golf",
"goo", "goog", "google", "gop", "gov", "graphics", "gratis", "green",
"gripe", "group", "guge", "guide", "guitars", "guru", "hamburg", "hangout",
"haus", "healthcare", "help", "here", "hermes", "hiphop", "hitachi", "hiv",
"hockey", "holdings", "holiday", "homedepot", "homes", "honda", "horse",
"host", "hosting", "hoteles", "hotmail", "house", "how", "hsbc", "ibm",
"icbc", "ice", "icu", "ifm", "iinet", "immo", "immobilien", "industries",
"infiniti", "info", "ing", "ink", "institute", "insure", "int",
"international", "investments", "ipiranga", "irish", "ist", "istanbul",
"itau", "iwc", "java", "jcb", "jetzt", "jewelry", "jlc", "jll", "jobs",
"joburg", "jprs", "juegos", "kaufen", "kddi", "kim", "kitchen", "kiwi",
"koeln", "komatsu", "krd", "kred", "kyoto", "lacaixa", "lancaster", "land",
"lasalle", "lat", "latrobe", "law", "lawyer", "lds", "lease", "leclerc",
"legal", "lexus", "lgbt", "liaison", "lidl", "life", "lighting", "limited",
"limo", "link", "live", "lixil", "loan", "loans", "lol", "london", "lotte",
"lotto", "love", "ltda", "lupin", "luxe", "luxury", "madrid", "maif",
"maison", "man", "management", "mango", "market", "marketing", "markets",
"marriott", "mba", "media", "meet", "melbourne", "meme", "memorial", "men",
"menu", "miami", "microsoft", "mil", "mini", "mma", "mobi", "moda", "moe",
"mom", "monash", "money", "montblanc", "mormon", "mortgage", "moscow",
"motorcycles", "mov", "movie", "movistar", "mtn", "mtpc", "museum",
"nadex", "nagoya", "name", "navy", "nec", "net", "netbank", "network",
"neustar", "new", "news", "nexus", "ngo", "nhk", "nico", "ninja", "nissan",
"nokia", "nra", "nrw", "ntt", "nyc", "office", "okinawa", "omega", "one",
"ong", "onl", "online", "ooo", "oracle", "orange", "org", "organic",
"osaka", "otsuka", "ovh", "page", "panerai", "paris", "partners", "parts",
"party", "pet", "pharmacy", "philips", "photo", "photography", "photos",
"physio", "piaget", "pics", "pictet", "pictures", "pink", "pizza", "place",
"play", "plumbing", "plus", "pohl", "poker", "porn", "post", "praxi",
"press", "pro", "prod", "productions", "prof", "properties", "property",
"pub", "qpon", "quebec", "racing", "realtor", "realty", "recipes", "red",
"redstone", "rehab", "reise", "reisen", "reit", "ren", "rent", "rentals",
"repair", "report", "republican", "rest", "restaurant", "review",
"reviews", "rich", "ricoh", "rio", "rip", "rocks", "rodeo", "rsvp", "ruhr",
"run", "ryukyu", "saarland", "sakura", "sale", "samsung", "sandvik",
"sandvikcoromant", "sanofi", "sap", "sarl", "saxo", "sca", "scb",
"schmidt", "scholarships", "school", "schule", "schwarz", "science",
"scor", "scot", "seat", "seek", "sener", "services", "sew", "sex", "sexy",
"shiksha", "shoes", "show", "shriram", "singles", "site", "ski", "sky",
"skype", "sncf", "soccer", "social", "software", "sohu", "solar",
"solutions", "sony", "soy", "space", "spiegel", "spreadbetting", "srl",
"starhub", "statoil", "studio", "study", "style", "sucks", "supplies",
"supply", "support", "surf", "surgery", "suzuki", "swatch", "swiss",
"sydney", "systems", "taipei", "tatamotors", "tatar", "tattoo", "tax",
"taxi", "team", "tech", "technology", "tel", "telefonica", "temasek",
"tennis", "thd", "theater", "tickets", "tienda", "tips", "tires", "tirol",
"today", "tokyo", "tools", "top", "toray", "toshiba", "tours", "town",
"toyota", "toys", "trade", "trading", "training", "travel", "trust", "tui",
"ubs", "university", "uno", "uol", "vacations", "vegas", "ventures",
"vermögensberater", "vermögensberatung", "versicherung", "vet", "viajes",
"video", "villas", "vin", "vision", "vista", "vistaprint", "vlaanderen",
"vodka", "vote", "voting", "voto", "voyage", "wales", "walter", "wang",
"watch", "webcam", "website", "wed", "wedding", "weir", "whoswho", "wien",
"wiki", "williamhill", "win", "windows", "wine", "wme", "work", "works",
"world", "wtc", "wtf", "xbox", "xerox", "xin", "xperia", "xxx", "xyz",
"yachts", "yandex", "yodobashi", "yoga", "yokohama", "youtube", "zip",
"zone", "zuerich", "дети", "ком", "москва", "онлайн", "орг", "рус", "сайт",
"קום", "بازار", "شبكة", "كوم", "موقع", "कॉम", "नेट", "संगठन", "คอม",
"みんな", "グーグル", "コム", "世界", "中信", "中文网", "企业", "佛山", "信息",
"健康", "八卦", "公司", "公益", "商城", "商店", "商标", "在线", "大拿", "娱乐",
"工行", "广东", "慈善", "我爱你", "手机", "政务", "政府", "新闻", "时尚", "机构",
"淡马锡", "游戏", "点看", "移动", "组织机构", "网址", "网店", "网络", "谷歌", "集团",
"飞利浦", "餐厅", "닷넷", "닷컴", "삼성", "onion"]
URL_REGEXP = re.compile(r'(?i)((?:https?://|www\\.)*(?:[\w+-_]+[.])(?:' + r'\b|'.join(TLDS) + r'\b|(?:[0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5]))+(?:[:\w+\/]?[a-z0-9!\*\'\(\);:&=\+\$/%#\[\]\-_\.,~?])*)', re.UNICODE)
def calc_expected_status_length(status, short_url_length=23):
replaced_chars = 0
status_length = len(status)
match = re.findall(URL_REGEXP, status)
if len(match) >= 1:
replaced_chars = len(''.join(match))
status_length = status_length - replaced_chars + (short_url_length * len(match))
return status_length
def is_url(text):
if re.findall(URL_REGEXP, text):
return True
else:
return False
|
shichao-an/python-twitter
|
twitter/twitter_utils.py
|
Python
|
apache-2.0
| 10,774
|
[
"CASINO",
"MOE"
] |
cdc05e53d8ce9bb3ab2ce7d4cd4c5c9ba730794a1ca0ceb31327944c15bc1106
|
import datetime as dt
import os
import re
import sys
import pytest
from click.testing import CliRunner
from freezegun import freeze_time
from khal.cli import main_ikhal, main_khal
from .utils import _get_ics_filepath, _get_text
class CustomCliRunner(CliRunner):
def __init__(self, config_file, db=None, calendars=None,
xdg_data_home=None, xdg_config_home=None, tmpdir=None, **kwargs):
self.config_file = config_file
self.db = db
self.calendars = calendars
self.xdg_data_home = xdg_data_home
self.xdg_config_home = xdg_config_home
self.tmpdir = tmpdir
super().__init__(**kwargs)
def invoke(self, cli, args=None, *a, **kw):
args = ['-c', str(self.config_file)] + (args or [])
return super().invoke(cli, args, *a, **kw)
@pytest.fixture
def runner(tmpdir, monkeypatch):
db = tmpdir.join('khal.db')
calendar = tmpdir.mkdir('calendar')
calendar2 = tmpdir.mkdir('calendar2')
calendar3 = tmpdir.mkdir('calendar3')
xdg_data_home = tmpdir.join('vdirs')
xdg_config_home = tmpdir.join('.config')
config_file = xdg_config_home.join('khal').join('config')
# TODO create a vdir config on disk and let vdirsyncer actually read it
monkeypatch.setattr('vdirsyncer.cli.config.load_config', lambda: Config())
monkeypatch.setattr('xdg.BaseDirectory.xdg_data_home', str(xdg_data_home))
monkeypatch.setattr('xdg.BaseDirectory.xdg_config_home', str(xdg_config_home))
monkeypatch.setattr('xdg.BaseDirectory.xdg_config_dirs', [str(xdg_config_home)])
def inner(print_new=False, default_calendar=True, days=2, **kwargs):
if default_calendar:
default_calendar = 'default_calendar = one'
else:
default_calendar = ''
if not os.path.exists(str(xdg_config_home.join('khal'))):
os.makedirs(str(xdg_config_home.join('khal')))
config_file.write(config_template.format(
delta=str(days) + 'd',
calpath=str(calendar), calpath2=str(calendar2), calpath3=str(calendar3),
default_calendar=default_calendar,
print_new=print_new,
dbpath=str(db), **kwargs))
runner = CustomCliRunner(
config_file=config_file, db=db, calendars={"one": calendar},
xdg_data_home=xdg_data_home, xdg_config_home=xdg_config_home,
tmpdir=tmpdir,
)
return runner
return inner
config_template = '''
[calendars]
[[one]]
path = {calpath}
color = dark blue
[[two]]
path = {calpath2}
color = dark green
[[three]]
path = {calpath3}
[locale]
local_timezone = Europe/Berlin
default_timezone = Europe/Berlin
timeformat = %H:%M
dateformat = %d.%m.
longdateformat = %d.%m.%Y
datetimeformat = %d.%m. %H:%M
longdatetimeformat = %d.%m.%Y %H:%M
firstweekday = 0
[default]
{default_calendar}
timedelta = {delta}
print_new = {print_new}
[sqlite]
path = {dbpath}
'''
def test_direct_modification(runner):
runner = runner()
result = runner.invoke(main_khal, ['list'])
assert result.output == 'No events\n'
assert not result.exception
cal_dt = _get_text('event_dt_simple')
event = runner.calendars['one'].join('test.ics')
event.write(cal_dt)
format = '{start-end-time-style}: {title}'
args = ['list', '--format', format, '--day-format', '', '09.04.2014']
result = runner.invoke(main_khal, args)
assert not result.exception
assert result.output == '09:30-10:30: An Event\n'
os.remove(str(event))
result = runner.invoke(main_khal, ['list'])
assert not result.exception
assert result.output == 'No events\n'
def test_simple(runner):
runner = runner(days=2)
result = runner.invoke(main_khal, ['list'])
assert not result.exception
assert result.output == 'No events\n'
now = dt.datetime.now().strftime('%d.%m.%Y')
result = runner.invoke(
main_khal, f'new {now} 18:00 myevent'.split())
assert result.output == ''
assert not result.exception
result = runner.invoke(main_khal, ['list'])
print(result.output)
assert 'myevent' in result.output
assert '18:00' in result.output
# test show_all_days default value
assert 'Tomorrow:' not in result.output
assert not result.exception
def test_simple_color(runner):
runner = runner(days=2)
now = dt.datetime.now().strftime('%d.%m.%Y')
result = runner.invoke(main_khal, f'new {now} 18:00 myevent'.split())
assert result.output == ''
assert not result.exception
result = runner.invoke(main_khal, ['list'], color=True)
assert not result.exception
assert '\x1b[34m' in result.output
def test_days(runner):
runner = runner(days=9)
when = (dt.datetime.now() + dt.timedelta(days=7)).strftime('%d.%m.%Y')
result = runner.invoke(main_khal, f'new {when} 18:00 nextweek'.split())
assert result.output == ''
assert not result.exception
when = (dt.datetime.now() + dt.timedelta(days=30)).strftime('%d.%m.%Y')
result = runner.invoke(main_khal, f'new {when} 18:00 nextmonth'.split())
assert result.output == ''
assert not result.exception
result = runner.invoke(main_khal, ['list'])
assert 'nextweek' in result.output
assert 'nextmonth' not in result.output
assert '18:00' in result.output
assert not result.exception
def test_notstarted(runner):
with freeze_time('2015-6-1 15:00'):
runner = runner(days=2)
for command in [
'new 30.5.2015 5.6.2015 long event',
'new 2.6.2015 4.6.2015 two day event',
'new 1.6.2015 14:00 18:00 four hour event',
'new 1.6.2015 16:00 17:00 one hour event',
'new 2.6.2015 10:00 13:00 three hour event',
]:
result = runner.invoke(main_khal, command.split())
assert not result.exception
result = runner.invoke(main_khal, 'list now'.split())
assert result.output == \
"""Today, 01.06.2015
↔ long event
14:00-18:00 four hour event
16:00-17:00 one hour event
Tomorrow, 02.06.2015
↔ long event
↦ two day event
10:00-13:00 three hour event
Wednesday, 03.06.2015
↔ long event
↔ two day event
"""
assert not result.exception
result = runner.invoke(main_khal, 'list now --notstarted'.split())
assert result.output == \
"""Today, 01.06.2015
16:00-17:00 one hour event
Tomorrow, 02.06.2015
↦ two day event
10:00-13:00 three hour event
Wednesday, 03.06.2015
↔ two day event
"""
assert not result.exception
result = runner.invoke(main_khal, 'list now --once'.split())
assert result.output == \
"""Today, 01.06.2015
↔ long event
14:00-18:00 four hour event
16:00-17:00 one hour event
Tomorrow, 02.06.2015
↦ two day event
10:00-13:00 three hour event
"""
assert not result.exception
result = runner.invoke(main_khal, 'list now --once --notstarted'.split())
assert result.output == \
"""Today, 01.06.2015
16:00-17:00 one hour event
Tomorrow, 02.06.2015
↦ two day event
10:00-13:00 three hour event
"""
assert not result.exception
def test_calendar(runner):
with freeze_time('2015-6-1'):
runner = runner(days=0)
result = runner.invoke(main_khal, ['calendar'])
assert not result.exception
assert result.exit_code == 0
output = '\n'.join([
" Mo Tu We Th Fr Sa Su No events",
"Jun 1 2 3 4 5 6 7 ",
" 8 9 10 11 12 13 14 ",
" 15 16 17 18 19 20 21 ",
" 22 23 24 25 26 27 28 ",
"Jul 29 30 1 2 3 4 5 ",
" 6 7 8 9 10 11 12 ",
" 13 14 15 16 17 18 19 ",
" 20 21 22 23 24 25 26 ",
"Aug 27 28 29 30 31 1 2 ",
" 3 4 5 6 7 8 9 ",
" 10 11 12 13 14 15 16 ",
" 17 18 19 20 21 22 23 ",
" 24 25 26 27 28 29 30 ",
"Sep 31 1 2 3 4 5 6 ",
"",
])
assert result.output == output
def test_long_calendar(runner):
with freeze_time('2015-6-1'):
runner = runner(days=100)
result = runner.invoke(main_khal, ['calendar'])
assert not result.exception
assert result.exit_code == 0
output = '\n'.join([
" Mo Tu We Th Fr Sa Su No events",
"Jun 1 2 3 4 5 6 7 ",
" 8 9 10 11 12 13 14 ",
" 15 16 17 18 19 20 21 ",
" 22 23 24 25 26 27 28 ",
"Jul 29 30 1 2 3 4 5 ",
" 6 7 8 9 10 11 12 ",
" 13 14 15 16 17 18 19 ",
" 20 21 22 23 24 25 26 ",
"Aug 27 28 29 30 31 1 2 ",
" 3 4 5 6 7 8 9 ",
" 10 11 12 13 14 15 16 ",
" 17 18 19 20 21 22 23 ",
" 24 25 26 27 28 29 30 ",
"Sep 31 1 2 3 4 5 6 ",
" 7 8 9 10 11 12 13 ",
" 14 15 16 17 18 19 20 ",
" 21 22 23 24 25 26 27 ",
"Oct 28 29 30 1 2 3 4 ",
"",
])
assert result.output == output
def test_default_command_empty(runner):
runner = runner(days=2)
result = runner.invoke(main_khal)
assert result.exception
assert result.exit_code == 2
assert result.output.startswith('Usage: ')
def test_invalid_calendar(runner):
runner = runner(days=2)
result = runner.invoke(
main_khal, ['new'] + '-a one 18:00 myevent'.split())
assert not result.exception
result = runner.invoke(
main_khal, ['new'] + '-a inexistent 18:00 myevent'.split())
assert result.exception
assert result.exit_code == 2
assert 'Unknown calendar ' in result.output
def test_attach_calendar(runner):
runner = runner(days=2)
result = runner.invoke(main_khal, ['printcalendars'])
assert set(result.output.split('\n')[:3]) == {'one', 'two', 'three'}
assert not result.exception
result = runner.invoke(main_khal, ['printcalendars', '-a', 'one'])
assert result.output == 'one\n'
assert not result.exception
result = runner.invoke(main_khal, ['printcalendars', '-d', 'one'])
assert set(result.output.split('\n')[:2]) == {'two', 'three'}
assert not result.exception
@pytest.mark.parametrize('contents', [
'',
'BEGIN:VCALENDAR\nBEGIN:VTODO\nEND:VTODO\nEND:VCALENDAR\n'
])
def test_no_vevent(runner, tmpdir, contents):
runner = runner(days=2)
broken_item = runner.calendars['one'].join('broken_item.ics')
broken_item.write(contents.encode('utf-8'), mode='wb')
result = runner.invoke(main_khal, ['list'])
assert not result.exception
assert 'No events' in result.output
def test_printformats(runner):
runner = runner(days=2)
result = runner.invoke(main_khal, ['printformats'])
assert '\n'.join(['longdatetimeformat: 21.12.2013 21:45',
'datetimeformat: 21.12. 21:45',
'longdateformat: 21.12.2013',
'dateformat: 21.12.',
'timeformat: 21:45',
'']) == result.output
assert not result.exception
# "see #810"
@pytest.mark.xfail
def test_repeating(runner):
runner = runner(days=2)
now = dt.datetime.now().strftime('%d.%m.%Y')
end_date = dt.datetime.now() + dt.timedelta(days=10)
result = runner.invoke(
main_khal, (f"new {now} 18:00 myevent -r weekly -u "
f"{end_date.strftime('%d.%m.%Y')}").split())
assert not result.exception
assert result.output == ''
def test_at(runner):
runner = runner(days=2)
now = dt.datetime.now().strftime('%d.%m.%Y')
end_date = dt.datetime.now() + dt.timedelta(days=10)
result = runner.invoke(
main_khal,
f"new {now} {end_date.strftime('%d.%m.%Y')} 18:00 myevent".split())
args = ['--color', 'at', '--format', '{start-time}{title}', '--day-format', '', '18:30']
result = runner.invoke(main_khal, args)
assert not result.exception
assert result.output.startswith('myevent')
def test_at_day_format(runner):
runner = runner(days=2)
now = dt.datetime.now().strftime('%d.%m.%Y')
end_date = dt.datetime.now() + dt.timedelta(days=10)
result = runner.invoke(
main_khal,
f"new {now} {end_date.strftime('%d.%m.%Y')} 18:00 myevent".split())
args = ['--color', 'at', '--format', '{start-time}{title}', '--day-format', '{name}', '18:30']
result = runner.invoke(main_khal, args)
assert not result.exception
assert result.output.startswith('Today\x1b[0m\nmyevent')
def test_list(runner):
runner = runner(days=2)
now = dt.datetime.now().strftime('%d.%m.%Y')
result = runner.invoke(
main_khal,
f'new {now} 18:00 myevent'.split())
format = '{red}{start-end-time-style}{reset} {title} :: {description}'
args = ['--color', 'list', '--format', format, '--day-format', 'header', '18:30']
result = runner.invoke(main_khal, args)
expected = 'header\x1b[0m\n\x1b[31m18:00-19:00\x1b[0m myevent :: \x1b[0m\n'
assert not result.exception
assert result.output.startswith(expected)
def test_search(runner):
runner = runner(days=2)
now = dt.datetime.now().strftime('%d.%m.%Y')
result = runner.invoke(main_khal, f'new {now} 18:00 myevent'.split())
format = '{red}{start-end-time-style}{reset} {title} :: {description}'
result = runner.invoke(main_khal, ['--color', 'search', '--format', format, 'myevent'])
assert not result.exception
assert result.output.startswith('\x1b[34m\x1b[31m18:00')
def test_no_default_new(runner):
runner = runner(default_calendar=False)
result = runner.invoke(main_khal, 'new 18:00 beer'.split())
assert ("Error: Invalid value: No default calendar is configured, "
"please provide one explicitly.") in result.output
assert result.exit_code == 2
def test_import(runner, monkeypatch):
runner = runner()
result = runner.invoke(main_khal, 'import -a one -a two import file.ics'.split())
assert result.exception
assert result.exit_code == 2
assert 'Can\'t use "--include-calendar" / "-a" more than once' in result.output
class FakeImport():
args, kwargs = None, None
def clean(self):
self.args, self.kwargs = None, None
def import_ics(self, *args, **kwargs):
print('saving args')
print(args)
self.args = args
self.kwargs = kwargs
fake = FakeImport()
monkeypatch.setattr('khal.controllers.import_ics', fake.import_ics)
# as we are not actually parsing the file we want to import, we can use
# any readable file at all, therefore re-using the configuration file
result = runner.invoke(main_khal, f'import -a one {runner.config_file}'.split())
assert not result.exception
assert {cal['name'] for cal in fake.args[0].calendars} == {'one'}
fake.clean()
result = runner.invoke(main_khal, f'import {runner.config_file}'.split())
assert not result.exception
assert {cal['name'] for cal in fake.args[0].calendars} == {'one', 'two', 'three'}
def test_import_proper(runner):
runner = runner()
result = runner.invoke(main_khal, ['import', _get_ics_filepath('cal_d')], input='0\ny\n')
assert result.output.startswith('09.04.-09.04. An Event')
assert not result.exception
result = runner.invoke(main_khal, ['search', 'Event'])
assert result.output == '09.04.-09.04. An Event\n'
def test_import_proper_invalid_timezone(runner):
runner = runner()
result = runner.invoke(
main_khal, ['import', _get_ics_filepath('invalid_tzoffset')], input='0\ny\n')
assert result.output.startswith(
'warning: Invalid timezone offset encountered, timezone information may be wrong')
assert not result.exception
result = runner.invoke(main_khal, ['search', 'Event'])
assert result.output.startswith(
'warning: Invalid timezone offset encountered, timezone information may be wrong')
assert '02.12. 08:00-02.12. 09:30 Some event' in result.output
def test_import_invalid_choice_and_prefix(runner):
runner = runner()
result = runner.invoke(main_khal, ['import', _get_ics_filepath('cal_d')], input='9\nth\ny\n')
assert result.output.startswith('09.04.-09.04. An Event')
assert result.output.find('invalid choice') == 125
assert not result.exception
result = runner.invoke(main_khal, ['search', 'Event'])
assert result.output == '09.04.-09.04. An Event\n'
def test_import_from_stdin(runner, monkeypatch):
ics_data = 'This is some really fake icalendar data'
class FakeImport():
args, kwargs = None, None
call_count = 0
def clean(self):
self.args, self.kwargs = None, None
def import_ics(self, *args, **kwargs):
print('saving args')
print(args)
self.call_count += 1
self.args = args
self.kwargs = kwargs
importer = FakeImport()
monkeypatch.setattr('khal.controllers.import_ics', importer.import_ics)
runner = runner()
result = runner.invoke(main_khal, ['import'], input=ics_data)
assert not result.exception
assert importer.call_count == 1
assert importer.kwargs['ics'] == ics_data
def test_interactive_command(runner, monkeypatch):
runner = runner(days=2)
token = "hooray"
def fake_ui(*a, **kw):
print(token)
sys.exit(0)
monkeypatch.setattr('khal.ui.start_pane', fake_ui)
result = runner.invoke(main_ikhal, ['-a', 'one'])
assert not result.exception
assert result.output.strip() == token
result = runner.invoke(main_khal, ['interactive', '-a', 'one'])
assert not result.exception
assert result.output.strip() == token
def test_color_option(runner):
runner = runner(days=2)
result = runner.invoke(main_khal, ['--no-color', 'list'])
assert result.output == 'No events\n'
result = runner.invoke(main_khal, ['--color', 'list'])
assert 'No events' in result.output
assert result.output != 'No events\n'
def choices(dateformat=0, timeformat=0,
parse_vdirsyncer_conf=True,
create_vdir=False,
default_calendar='',
write_config=True):
"""helper function to generate input for testing `configure`"""
confirm = {True: 'y', False: 'n'}
out = [
str(dateformat), str(timeformat),
confirm[parse_vdirsyncer_conf],
]
if not parse_vdirsyncer_conf:
out.append(confirm[create_vdir])
out.append(default_calendar)
out.append(confirm[write_config])
out.append('')
return '\n'.join(out)
class Config():
"""helper class for mocking vdirsyncer's config objects"""
# TODO crate a vdir config on disk and let vdirsyncer actually read it
storages = {
'home_calendar_local': {
'type': 'filesystem',
'instance_name': 'home_calendar_local',
'path': '~/.local/share/calendars/home/',
'fileext': '.ics',
},
'events_local': {
'type': 'filesystem',
'instance_name': 'events_local',
'path': '~/.local/share/calendars/events/',
'fileext': '.ics',
},
'home_calendar_remote': {
'type': 'caldav',
'url': 'https://some.url/caldav',
'username': 'foo',
'password.fetch': ['command', 'get_secret'],
'instance_name': 'home_calendar_remote',
},
'home_contacts_remote': {
'type': 'carddav',
'url': 'https://another.url/caldav',
'username': 'bar',
'password.fetch': ['command', 'get_secret'],
'instance_name': 'home_contacts_remote',
},
'home_contacts_local': {
'type': 'filesystem',
'instance_name': 'home_contacts_local',
'path': '~/.local/share/contacts/',
'fileext': '.vcf',
},
'events_remote': {
'type': 'http',
'instance_name': 'events_remote',
'url': 'http://list.of/events/',
},
}
def test_configure_command(runner):
runner_factory = runner
runner = runner()
runner.config_file.remove()
result = runner.invoke(main_khal, ['configure'], input=choices())
assert f'Successfully wrote configuration to {runner.config_file}' in result.output
assert result.exit_code == 0
with open(str(runner.config_file)) as f:
actual_config = ''.join(f.readlines())
assert actual_config == '''[calendars]
[[events_local]]
path = ~/.local/share/calendars/events/*
type = discover
[[home_calendar_local]]
path = ~/.local/share/calendars/home/*
type = discover
[[home_contacts_local]]
path = ~/.local/share/contacts/*
type = discover
[locale]
timeformat = %H:%M
dateformat = %Y-%m-%d
longdateformat = %Y-%m-%d
datetimeformat = %Y-%m-%d %H:%M
longdatetimeformat = %Y-%m-%d %H:%M
[default]
default_calendar = events_local
'''
# if aborting, no config file should be written
runner = runner_factory()
assert os.path.exists(str(runner.config_file))
runner.config_file.remove()
assert not os.path.exists(str(runner.config_file))
result = runner.invoke(main_khal, ['configure'], input=choices(write_config=False))
assert 'aborted' in result.output
assert result.exit_code == 1
def test_print_ics_command(runner):
runner = runner()
# Input is empty and loading from stdin
result = runner.invoke(main_khal, ['printics', '-'])
assert result.exception
# Non existing file
result = runner.invoke(main_khal, ['printics', 'nonexisting_file'])
assert result.exception
assert re.search(r'''Error: Invalid value for "?'?\[?(ICS|ics)\]?'?"?: '''
r'''('nonexisting_file': No such file or directory\n|'''
r'Could not open file:)', result.output)
# Run on test files
result = runner.invoke(main_khal, ['printics', _get_ics_filepath('cal_d')])
assert not result.exception
result = runner.invoke(main_khal, ['printics', _get_ics_filepath('cal_dt_two_tz')])
assert not result.exception
# Test with some nice format strings
form = '{uid}\t{title}\t{description}\t{start}\t{start-long}\t{start-date}' \
'\t{start-date-long}\t{start-time}\t{end}\t{end-long}\t{end-date}' \
'\t{end-date-long}\t{end-time}\t{repeat-symbol}\t{description}' \
'\t{description-separator}\t{location}\t{calendar}' \
'\t{calendar-color}\t{start-style}\t{to-style}\t{end-style}' \
'\t{start-end-time-style}\t{end-necessary}\t{end-necessary-long}'
result = runner.invoke(main_khal, [
'printics', '-f', form, _get_ics_filepath('cal_dt_two_tz')])
assert not result.exception
assert 25 == len(result.output.split('\t'))
result = runner.invoke(main_khal, [
'printics', '-f', form, _get_ics_filepath('cal_dt_two_tz')])
assert not result.exception
assert 25 == len(result.output.split('\t'))
def test_printics_read_from_stdin(runner):
runner = runner(command='printics')
result = runner.invoke(main_khal, ['printics'], input=_get_text('cal_d'))
assert not result.exception
assert '1 events found in stdin input\n09.04.-09.04. An Event\n' in result.output
def test_configure_command_config_exists(runner):
runner = runner()
result = runner.invoke(main_khal, ['configure'], input=choices())
assert 'Found an existing' in result.output
assert result.exit_code == 1
def test_configure_command_create_vdir(runner):
runner = runner()
runner.config_file.remove()
runner.xdg_config_home.remove()
result = runner.invoke(
main_khal, ['configure'],
input=choices(parse_vdirsyncer_conf=False, create_vdir=True),
)
assert f'Successfully wrote configuration to {str(runner.config_file)}' in result.output
assert result.exit_code == 0
with open(str(runner.config_file)) as f:
actual_config = ''.join(f.readlines())
assert actual_config == f'''[calendars]
[[private]]
path = {str(runner.xdg_data_home)}/khal/calendars/private
type = calendar
[locale]
timeformat = %H:%M
dateformat = %Y-%m-%d
longdateformat = %Y-%m-%d
datetimeformat = %Y-%m-%d %H:%M
longdatetimeformat = %Y-%m-%d %H:%M
[default]
default_calendar = private
'''
# running configure again, should yield another vdir path, as the old
# one still exists
runner.config_file.remove()
result = runner.invoke(
main_khal, ['configure'],
input=choices(parse_vdirsyncer_conf=False, create_vdir=True),
)
assert f'Successfully wrote configuration to {str(runner.config_file)}' in result.output
assert result.exit_code == 0
with open(str(runner.config_file)) as f:
actual_config = ''.join(f.readlines())
assert f'{runner.xdg_data_home}/khal/calendars/private1' in actual_config
def cleanup(paths):
"""reset permissions of all files and folders in `paths` to 644 resp. 755"""
for path in paths:
if os.path.exists(path):
os.chmod(str(path), 0o755)
for dirpath, _dirnames, filenames in os.walk(path):
os.chmod(str(dirpath), 0o755)
for filename in filenames:
os.chmod(str(os.path.join(dirpath, filename)), 0o644)
def test_configure_command_cannot_write_config_file(runner):
runner = runner()
runner.config_file.remove()
os.chmod(str(runner.xdg_config_home), 555)
result = runner.invoke(main_khal, ['configure'], input=choices())
assert result.exit_code == 1
# make sure pytest can clean up behind us
cleanup([runner.xdg_config_home])
def test_configure_command_cannot_create_vdir(runner):
runner = runner()
runner.config_file.remove()
os.mkdir(str(runner.xdg_data_home), mode=555)
result = runner.invoke(
main_khal, ['configure'],
input=choices(parse_vdirsyncer_conf=False, create_vdir=True),
)
assert 'Exiting' in result.output
assert result.exit_code == 1
# make sure pytest can clean up behind us
cleanup([runner.xdg_data_home])
def test_configure_no_vdir(runner):
runner = runner()
runner.config_file.remove()
result = runner.invoke(
main_khal, ['configure'],
input=choices(parse_vdirsyncer_conf=False, create_vdir=False),
)
assert 'khal will not be usable like this' in result.output
assert result.exit_code == 0
assert not result.exception
def test_edit(runner):
runner = runner()
result = runner.invoke(main_khal, ['list'])
assert not result.exception
assert result.output == 'No events\n'
for name in ['event_dt_simple', 'event_d_15']:
cal_dt = _get_text(name)
event = runner.calendars['one'].join(f'{name}.ics')
event.write(cal_dt)
format = '{start-end-time-style}: {title}'
result = runner.invoke(
main_khal, ['edit', '--show-past', 'Event'], input='s\nGreat Event\nn\nn\n')
assert not result.exception
args = ['list', '--format', format, '--day-format', '', '09.04.2014']
result = runner.invoke(main_khal, args)
assert '09:30-10:30: Great Event' in result.output
assert not result.exception
args = ['list', '--format', format, '--day-format', '', '09.04.2015']
result = runner.invoke(main_khal, args)
assert ': An Event' in result.output
assert not result.exception
def test_new(runner):
runner = runner(print_new='path')
result = runner.invoke(main_khal, 'new 13.03.2016 3d Visit'.split())
assert not result.exception
assert result.output.endswith('.ics\n')
assert result.output.startswith(str(runner.tmpdir))
@freeze_time('2015-6-1 8:00')
def test_new_interactive(runner):
runner = runner(print_new='path')
result = runner.invoke(
main_khal, 'new -i'.split(),
'Another event\n13:00 17:00\n\nNone\nn\n'
)
assert not result.exception
assert result.exit_code == 0
def test_debug(runner):
runner = runner()
result = runner.invoke(main_khal, ['-v', 'debug', 'printformats'])
assert result.output.startswith('debug: khal 0.')
assert 'using the config file at' in result.output
assert 'debug: Using config:\ndebug: [calendars]' in result.output
assert not result.exception
@freeze_time('2015-6-1 8:00')
def test_new_interactive_extensive(runner):
runner = runner(print_new='path', default_calendar=False)
result = runner.invoke(
main_khal, 'new -i 15:00 15:30'.split(),
'?\ninvalid\ntwo\n'
'Unicce Name\n'
'\n'
'Europe/London\n'
'bar\n'
'l\non a boat\n'
'p\nweekly\n'
'1.1.2018\n'
'a\n30m\n'
'c\nwork\n'
'n\n'
)
assert not result.exception
assert result.exit_code == 0
@freeze_time('2015-6-1 8:00')
def test_issue_1056(runner):
"""if an ansi escape sequence is contained in the output, we can't parse it
properly"""
runner = runner(print_new='path', default_calendar=False)
result = runner.invoke(
main_khal, 'new -i'.split(),
'two\n'
'new event\n'
'now\n'
'Europe/London\n'
'None\n'
't\n' # edit datetime range
'\n'
'n\n'
)
assert 'error parsing range' not in result.output
assert not result.exception
assert result.exit_code == 0
|
pimutils/khal
|
tests/cli_test.py
|
Python
|
mit
| 29,697
|
[
"VisIt"
] |
18f66a0184c805a884e3f6e19e61c172ca62ef558c6ec809edf7b2bb6192149a
|
# ##########################################################################
#
# Example to submit Marlin job to ILCDirac, as an User job.
#
# To submit,
# python submarlin.py
#
# A. Miyamoto, 1-July-2019
#
# ###########################################################################
from DIRAC.Core.Base import Script
from DIRAC import gLogger, S_OK, S_ERROR
# ######################################
class _Params():
def __init__(self):
self.isLocal = False
self.numberOfEvents = 0
self.inputFile = "lfn:/ilc/user/a/amiyamot/testjob/2019-07/ddsim_example.slcio"
self.outputFilePrefix = ""
self.outputDir = ""
self.doOverlay = True
def setLocal( self, opt ):
self.isLocal = True
gLogger.info("Script is executed locally")
return S_OK()
def setNumberOfEvents( self, opt ):
self.numberOfEvents = int(opt)
gLogger.info("Number of events is %d" % self.numberOfEvents)
return S_OK()
def setInputFile( self, opt ):
self.inputFile = opt
gLogger.info("Input file is %s" % self.inputFile)
return S_OK()
def setOutputFilePrefix( self, opt ):
self.outputFilePrefix = opt
gLogger.info("Output file prefix is %s" % self.outputFilePrefix)
return S_OK()
def setOutputDir( self, opt ):
self.outputDir = opt
gLogger.info("Output file is written at %s" % self.outputDir)
return S_OK()
def setDoOverlay( self, opt ):
self.doOverlay = True
gLogger.info("Output file is written at %s" % self.outputDir)
# gLogger.warning("Do overlay background is requested, but this function is not implemente yet.")
return S_OK()
def registerSwitches(self):
Script.registerSwitch('l','local', 'If given, execute locally', self.setLocal )
# Script.registerSwitch('n:','number_of_events:', 'Number of events to simulate', self.setNumberOfEvents )
Script.registerSwitch('i:', 'InputFile:', 'Input file name', self.setInputFile)
Script.registerSwitch('f:', 'OutputFilePrefix:', 'Output file prefix', self.setOutputFilePrefix)
Script.registerSwitch('w:', 'WriteDir:', 'Output directory. No output, if not given', self.setOutputDir)
# Script.registerSwitch('O', 'Overlay', 'Overlay background data ', self.setDoOverlay)
msg = '%s [options]\n' % Script.scriptName
msg += 'Function: Submit a job for overlay reconstruction'
Script.setUsageMessage(msg)
# ######################################
# global variables to hold command line parameters
# ######################################
_clip = _Params()
_clip.registerSwitches()
Script.parseCommandLine()
from ILCDIRAC.Interfaces.API.NewInterface.UserJob import UserJob
from ILCDIRAC.Interfaces.API.NewInterface.Applications import Marlin, OverlayInput
from ILCDIRAC.Interfaces.API.DiracILC import DiracILC
# ######################################
def subOverlay():
# Decide parameters for a job
outputSE = "KEK-SRM"
isLocal = _clip.isLocal
nbevts = 50 if _clip.numberOfEvents == 0 else _clip.numberOfEvents
nbevts = 0 # To analize all input events
outputFilePrefix="overlay_example" if _clip.outputFilePrefix == "" else _clip.outputFilePrefix
outputDir = _clip.outputDir
inputFile = _clip.inputFile
if inputFile == "":
gLogger.error("Input file for ddsim does not given.")
exit(-1)
recfile = outputFilePrefix + ".rec.slcio"
dstfile = outputFilePrefix + ".dst.slcio"
detector_model = "ILD_l5_o1_v02"
key = detector_model.split('_')
sim_detectorModel = "_".join([key[0], key[1], key[3]])
# Create DIRAC objects for job submission
dIlc = DiracILC()
job = UserJob()
job.setJobGroup( "myoverlayjob" )
job.setName( "myoverlay" )
job.setOutputSandbox(['*.log', '*.sh', '*.py', '*.xml'])
job.setILDConfig("v02-00-02")
# job.setInputSandbox(["a6-parameters.sin", "P2f_qqbar.sin"])
# job.setDestination(["LCG.KEK.jp", "LCG.DESY-HH.de"]) # job submission destination
# job.setBannedSites([]) # a list of sites not to submit job
# job.setCPUTime( cputime_limit_in_seconds_by_dirac_units )
# Create Overlay application
ovldata = [
{"ProcessorName":"BgOverlayWW", "evttype":"aa_lowpt_WW", "ProdID":10237, "expBG":0.211, "subdir":"000"},
{"ProcessorName":"BgOverlayWB", "evttype":"aa_lowpt_WB", "ProdID":10241, "expBG":0.24605, "subdir":"000"},
{"ProcessorName":"BgOverlayBW", "evttype":"aa_lowpt_BW", "ProdID":10239, "expBG":0.243873, "subdir":"000"},
{"ProcessorName":"BgOverlayBB", "evttype":"aa_lowpt_BB", "ProdID":10235, "expBG":0.35063, "subdir":"000"},
{"ProcessorName":"PairBgOverlay", "evttype":"seeablepairs", "ProdID":10233, "expBG":1.0, "subdir":"100"}
]
BXOverlay = 1
NbSigEvtsPerJob = 100
numberOfSignalEvents = NbSigEvtsPerJob
basebkgpath = "/ilc/prod/ilc/mc-opt-3/ild/sim/500-TDR_ws"
energy = "500"
for ovl in ovldata:
print "### OverlayInput ... "+ovl["ProcessorName"]
ovlapp = OverlayInput()
ovlpath = "%s/%s/%s/v02-00-01/%8.8d/%s" % \
( basebkgpath, ovl["evttype"], sim_detectorModel, ovl["ProdID"] , ovl["subdir"] )
print " OverlayPath ... " + ovlpath
ovlapp.setMachine("ilc_dbd")
# ovlapp.setEnergy(energy)
# ovlapp.setDetectorModel(sim_detectorModel)
ovlapp.setProcessorName(ovl["ProcessorName"])
ovlapp.setBkgEvtType(ovl["evttype"])
ovlapp.setPathToFiles(ovlpath)
ovlapp.setGGToHadInt(ovl["expBG"])
ovlapp.setBXOverlay(BXOverlay)
ovlapp.setNbSigEvtsPerJob(NbSigEvtsPerJob)
ovlapp.setNumberOfSignalEventsPerJob(numberOfSignalEvents)
res = job.append(ovlapp)
if not res['OK']:
print res['Message']
exit(1)
# Create Marlin application
marlin = Marlin()
marlin.setVersion("ILCSoft-02-00-02_gcc49")
marlin.setDetectorModel(detector_model)
marlin.setSteeringFile("MarlinStdReco.xml")
marlin.setInputFile(inputFile)
marlin.setNumberOfEvents(nbevts)
marlin.setOutputDstFile(dstfile)
marlin.setOutputRecFile(recfile)
extraCLIArguments = " --constant.DetectorModel=%s " % detector_model
extraCLIArguments += " --constant.RunOverlay=true --constant.CMSEnergy=%s " % str(energy)
extraCLIArguments += " --global.Verbosity=MESSAGE "
marlin.setExtraCLIArguments( extraCLIArguments )
job.append(marlin)
if outputDir != "":
job.setOutputData( [dstfile, recfile], OutputPath = outputDir, OutputSE = outputSE )
if isLocal:
job.submit(dIlc, mode="local")
else:
job.submit(dIlc)
# ######################################
if __name__ == "__main__":
subOverlay()
|
akiyamiyamoto/Tutorial
|
grid/ilcdirac/overlay/suboverlay.py
|
Python
|
gpl-3.0
| 6,857
|
[
"DIRAC"
] |
ec6aed651b599ec08ca5b74738519e740353eb5b7c25804acc000baf516e3a4c
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Module to test fitting routines
"""
import os.path
import pytest
import numpy as np
from numpy import linalg
from numpy.testing import assert_allclose, assert_almost_equal
from unittest import mock
from . import irafutil
from astropy.modeling import models
from astropy.modeling.core import Fittable2DModel, Parameter
from astropy.modeling.fitting import *
from astropy.utils import NumpyRNGContext
from astropy.utils.data import get_pkg_data_filename
from .utils import ignore_non_integer_warning
from astropy.stats import sigma_clip
from astropy.utils.exceptions import AstropyUserWarning
from astropy.modeling.fitting import populate_entry_points
import warnings
try:
from scipy import optimize
HAS_SCIPY = True
except ImportError:
HAS_SCIPY = False
try:
from pkg_resources import EntryPoint
HAS_PKG = True
except ImportError:
HAS_PKG = False
fitters = [SimplexLSQFitter, SLSQPLSQFitter]
_RANDOM_SEED = 0x1337
class TestPolynomial2D:
"""Tests for 2D polynomail fitting."""
def setup_class(self):
self.model = models.Polynomial2D(2)
self.y, self.x = np.mgrid[:5, :5]
def poly2(x, y):
return 1 + 2 * x + 3 * x ** 2 + 4 * y + 5 * y ** 2 + 6 * x * y
self.z = poly2(self.x, self.y)
self.fitter = LinearLSQFitter()
def test_poly2D_fitting(self):
v = self.model.fit_deriv(x=self.x, y=self.y)
p = linalg.lstsq(v, self.z.flatten(), rcond=-1)[0]
new_model = self.fitter(self.model, self.x, self.y, self.z)
assert_allclose(new_model.parameters, p)
def test_eval(self):
new_model = self.fitter(self.model, self.x, self.y, self.z)
assert_allclose(new_model(self.x, self.y), self.z)
@pytest.mark.skipif('not HAS_SCIPY')
def test_polynomial2D_nonlinear_fitting(self):
self.model.parameters = [.6, 1.8, 2.9, 3.7, 4.9, 6.7]
nlfitter = LevMarLSQFitter()
new_model = nlfitter(self.model, self.x, self.y, self.z)
assert_allclose(new_model.parameters, [1, 2, 3, 4, 5, 6])
class TestICheb2D:
"""
Tests 2D Chebyshev polynomial fitting
Create a 2D polynomial (z) using Polynomial2DModel and default coefficients
Fit z using a ICheb2D model
Evaluate the ICheb2D polynomial and compare with the initial z
"""
def setup_class(self):
self.pmodel = models.Polynomial2D(2)
self.y, self.x = np.mgrid[:5, :5]
self.z = self.pmodel(self.x, self.y)
self.cheb2 = models.Chebyshev2D(2, 2)
self.fitter = LinearLSQFitter()
def test_default_params(self):
self.cheb2.parameters = np.arange(9)
p = np.array([1344., 1772., 400., 1860., 2448., 552., 432., 568.,
128.])
z = self.cheb2(self.x, self.y)
model = self.fitter(self.cheb2, self.x, self.y, z)
assert_almost_equal(model.parameters, p)
def test_poly2D_cheb2D(self):
model = self.fitter(self.cheb2, self.x, self.y, self.z)
z1 = model(self.x, self.y)
assert_almost_equal(self.z, z1)
@pytest.mark.skipif('not HAS_SCIPY')
def test_chebyshev2D_nonlinear_fitting(self):
cheb2d = models.Chebyshev2D(2, 2)
cheb2d.parameters = np.arange(9)
z = cheb2d(self.x, self.y)
cheb2d.parameters = [0.1, .6, 1.8, 2.9, 3.7, 4.9, 6.7, 7.5, 8.9]
nlfitter = LevMarLSQFitter()
model = nlfitter(cheb2d, self.x, self.y, z)
assert_allclose(model.parameters, [0, 1, 2, 3, 4, 5, 6, 7, 8],
atol=10**-9)
@pytest.mark.skipif('not HAS_SCIPY')
def test_chebyshev2D_nonlinear_fitting_with_weights(self):
cheb2d = models.Chebyshev2D(2, 2)
cheb2d.parameters = np.arange(9)
z = cheb2d(self.x, self.y)
cheb2d.parameters = [0.1, .6, 1.8, 2.9, 3.7, 4.9, 6.7, 7.5, 8.9]
nlfitter = LevMarLSQFitter()
weights = np.ones_like(self.y)
model = nlfitter(cheb2d, self.x, self.y, z, weights=weights)
assert_allclose(model.parameters, [0, 1, 2, 3, 4, 5, 6, 7, 8],
atol=10**-9)
@pytest.mark.skipif('not HAS_SCIPY')
class TestJointFitter:
"""
Tests the joint fitting routine using 2 gaussian models
"""
def setup_class(self):
"""
Create 2 gaussian models and some data with noise.
Create a fitter for the two models keeping the amplitude parameter
common for the two models.
"""
self.g1 = models.Gaussian1D(10, mean=14.9, stddev=.3)
self.g2 = models.Gaussian1D(10, mean=13, stddev=.4)
self.jf = JointFitter([self.g1, self.g2],
{self.g1: ['amplitude'],
self.g2: ['amplitude']}, [9.8])
self.x = np.arange(10, 20, .1)
y1 = self.g1(self.x)
y2 = self.g2(self.x)
with NumpyRNGContext(_RANDOM_SEED):
n = np.random.randn(100)
self.ny1 = y1 + 2 * n
self.ny2 = y2 + 2 * n
self.jf(self.x, self.ny1, self.x, self.ny2)
def test_joint_parameter(self):
"""
Tests that the amplitude of the two models is the same
"""
assert_allclose(self.jf.fitparams[0], self.g1.parameters[0])
assert_allclose(self.jf.fitparams[0], self.g2.parameters[0])
def test_joint_fitter(self):
"""
Tests the fitting routine with similar procedure.
Compares the fitted parameters.
"""
p1 = [14.9, .3]
p2 = [13, .4]
A = 9.8
p = np.r_[A, p1, p2]
def model(A, p, x):
return A * np.exp(-0.5 / p[1] ** 2 * (x - p[0]) ** 2)
def errfunc(p, x1, y1, x2, y2):
return np.ravel(np.r_[model(p[0], p[1:3], x1) - y1,
model(p[0], p[3:], x2) - y2])
coeff, _ = optimize.leastsq(errfunc, p,
args=(self.x, self.ny1, self.x, self.ny2))
assert_allclose(coeff, self.jf.fitparams, rtol=10 ** (-2))
class TestLinearLSQFitter:
def test_compound_model_raises_error(self):
"""Test that if an user tries to use a compound model, raises an error"""
with pytest.raises(ValueError) as excinfo:
init_model1 = models.Polynomial1D(degree=2, c0=[1, 1], n_models=2)
init_model2 = models.Polynomial1D(degree=2, c0=[1, 1], n_models=2)
init_model_comp = init_model1 + init_model2
x = np.arange(10)
y = init_model_comp(x, model_set_axis=False)
fitter = LinearLSQFitter()
fitted_model = fitter(init_model_comp, x, y)
assert "Model must be simple, not compound" in str(excinfo.value)
def test_chebyshev1D(self):
"""Tests fitting a 1D Chebyshev polynomial to some real world data."""
test_file = get_pkg_data_filename(os.path.join('data',
'idcompspec.fits'))
with open(test_file) as f:
lines = f.read()
reclist = lines.split('begin')
record = irafutil.IdentifyRecord(reclist[1])
coeffs = record.coeff
order = int(record.fields['order'])
initial_model = models.Chebyshev1D(order - 1,
domain=record.get_range())
fitter = LinearLSQFitter()
fitted_model = fitter(initial_model, record.x, record.z)
assert_allclose(fitted_model.parameters, np.array(coeffs),
rtol=10e-2)
def test_linear_fit_model_set(self):
"""Tests fitting multiple models simultaneously."""
init_model = models.Polynomial1D(degree=2, c0=[1, 1], n_models=2)
x = np.arange(10)
y_expected = init_model(x, model_set_axis=False)
assert y_expected.shape == (2, 10)
# Add a bit of random noise
with NumpyRNGContext(_RANDOM_SEED):
y = y_expected + np.random.normal(0, 0.01, size=y_expected.shape)
fitter = LinearLSQFitter()
fitted_model = fitter(init_model, x, y)
assert_allclose(fitted_model(x, model_set_axis=False), y_expected,
rtol=1e-1)
def test_linear_fit_2d_model_set(self):
"""Tests fitted multiple 2-D models simultaneously."""
init_model = models.Polynomial2D(degree=2, c0_0=[1, 1], n_models=2)
x = np.arange(10)
y = np.arange(10)
z_expected = init_model(x, y, model_set_axis=False)
assert z_expected.shape == (2, 10)
# Add a bit of random noise
with NumpyRNGContext(_RANDOM_SEED):
z = z_expected + np.random.normal(0, 0.01, size=z_expected.shape)
fitter = LinearLSQFitter()
fitted_model = fitter(init_model, x, y, z)
assert_allclose(fitted_model(x, y, model_set_axis=False), z_expected,
rtol=1e-1)
def test_linear_fit_fixed_parameter(self):
"""
Tests fitting a polynomial model with a fixed parameter (issue #6135).
"""
init_model = models.Polynomial1D(degree=2, c1=1)
init_model.c1.fixed = True
x = np.arange(10)
y = 2 + x + 0.5*x*x
fitter = LinearLSQFitter()
fitted_model = fitter(init_model, x, y)
assert_allclose(fitted_model.parameters, [2., 1., 0.5], atol=1e-14)
def test_linear_fit_model_set_fixed_parameter(self):
"""
Tests fitting a polynomial model set with a fixed parameter (#6135).
"""
init_model = models.Polynomial1D(degree=2, c1=[1, -2], n_models=2)
init_model.c1.fixed = True
x = np.arange(10)
yy = np.array([2 + x + 0.5*x*x, -2*x])
fitter = LinearLSQFitter()
fitted_model = fitter(init_model, x, yy)
assert_allclose(fitted_model.c0, [2., 0.], atol=1e-14)
assert_allclose(fitted_model.c1, [1., -2.], atol=1e-14)
assert_allclose(fitted_model.c2, [0.5, 0.], atol=1e-14)
def test_linear_fit_2d_model_set_fixed_parameters(self):
"""
Tests fitting a 2d polynomial model set with fixed parameters (#6135).
"""
init_model = models.Polynomial2D(degree=2, c1_0=[1, 2], c0_1=[-0.5, 1],
n_models=2,
fixed={'c1_0': True, 'c0_1': True})
x, y = np.mgrid[0:5, 0:5]
zz = np.array([1+x-0.5*y+0.1*x*x, 2*x+y-0.2*y*y])
fitter = LinearLSQFitter()
fitted_model = fitter(init_model, x, y, zz)
assert_allclose(fitted_model(x, y, model_set_axis=False), zz,
atol=1e-14)
def test_linear_fit_model_set_masked_values(self):
"""
Tests model set fitting with masked value(s) (#4824, #6819).
"""
# NB. For single models, there is an equivalent doctest.
init_model = models.Polynomial1D(degree=1, n_models=2)
x = np.arange(10)
y = np.ma.masked_array([2*x+1, x-2], mask=np.zeros_like([x, x]))
y[0, 7] = 100. # throw off fit coefficients if unmasked
y.mask[0, 7] = True
y[1, 1:3] = -100.
y.mask[1, 1:3] = True
fitter = LinearLSQFitter()
fitted_model = fitter(init_model, x, y)
assert_allclose(fitted_model.c0, [1., -2.], atol=1e-14)
assert_allclose(fitted_model.c1, [2., 1.], atol=1e-14)
def test_linear_fit_2d_model_set_masked_values(self):
"""
Tests 2D model set fitting with masked value(s) (#4824, #6819).
"""
init_model = models.Polynomial2D(1, n_models=2)
x, y = np.mgrid[0:5, 0:5]
z = np.ma.masked_array([2*x+3*y+1, x-0.5*y-2],
mask=np.zeros_like([x, x]))
z[0, 3, 1] = -1000. # throw off fit coefficients if unmasked
z.mask[0, 3, 1] = True
fitter = LinearLSQFitter()
fitted_model = fitter(init_model, x, y, z)
assert_allclose(fitted_model.c0_0, [1., -2.], atol=1e-14)
assert_allclose(fitted_model.c1_0, [2., 1.], atol=1e-14)
assert_allclose(fitted_model.c0_1, [3., -0.5], atol=1e-14)
@pytest.mark.skipif('not HAS_SCIPY')
class TestNonLinearFitters:
"""Tests non-linear least squares fitting and the SLSQP algorithm."""
def setup_class(self):
self.initial_values = [100, 5, 1]
self.xdata = np.arange(0, 10, 0.1)
sigma = 4. * np.ones_like(self.xdata)
with NumpyRNGContext(_RANDOM_SEED):
yerror = np.random.normal(0, sigma)
def func(p, x):
return p[0] * np.exp(-0.5 / p[2] ** 2 * (x - p[1]) ** 2)
self.ydata = func(self.initial_values, self.xdata) + yerror
self.gauss = models.Gaussian1D(100, 5, stddev=1)
def test_estimated_vs_analytic_deriv(self):
"""
Runs `LevMarLSQFitter` with estimated and analytic derivatives of a
`Gaussian1D`.
"""
fitter = LevMarLSQFitter()
model = fitter(self.gauss, self.xdata, self.ydata)
g1e = models.Gaussian1D(100, 5.0, stddev=1)
efitter = LevMarLSQFitter()
emodel = efitter(g1e, self.xdata, self.ydata, estimate_jacobian=True)
assert_allclose(model.parameters, emodel.parameters, rtol=10 ** (-3))
def test_estimated_vs_analytic_deriv_with_weights(self):
"""
Runs `LevMarLSQFitter` with estimated and analytic derivatives of a
`Gaussian1D`.
"""
weights = 1.0 / (self.ydata / 10.)
fitter = LevMarLSQFitter()
model = fitter(self.gauss, self.xdata, self.ydata, weights=weights)
g1e = models.Gaussian1D(100, 5.0, stddev=1)
efitter = LevMarLSQFitter()
emodel = efitter(g1e, self.xdata, self.ydata, weights=weights, estimate_jacobian=True)
assert_allclose(model.parameters, emodel.parameters, rtol=10 ** (-3))
def test_with_optimize(self):
"""
Tests results from `LevMarLSQFitter` against `scipy.optimize.leastsq`.
"""
fitter = LevMarLSQFitter()
model = fitter(self.gauss, self.xdata, self.ydata,
estimate_jacobian=True)
def func(p, x):
return p[0] * np.exp(-0.5 / p[2] ** 2 * (x - p[1]) ** 2)
def errfunc(p, x, y):
return func(p, x) - y
result = optimize.leastsq(errfunc, self.initial_values,
args=(self.xdata, self.ydata))
assert_allclose(model.parameters, result[0], rtol=10 ** (-3))
def test_with_weights(self):
"""
Tests results from `LevMarLSQFitter` with weights.
"""
# part 1: weights are equal to 1
fitter = LevMarLSQFitter()
model = fitter(self.gauss, self.xdata, self.ydata,
estimate_jacobian=True)
withw = fitter(self.gauss, self.xdata, self.ydata,
estimate_jacobian=True, weights=np.ones_like(self.xdata))
assert_allclose(model.parameters, withw.parameters, rtol=10 ** (-4))
# part 2: weights are 0 or 1 (effectively, they are a mask)
weights = np.zeros_like(self.xdata)
weights[::2] = 1.
mask = weights >= 1.
model = fitter(self.gauss, self.xdata[mask], self.ydata[mask],
estimate_jacobian=True)
withw = fitter(self.gauss, self.xdata, self.ydata,
estimate_jacobian=True, weights=weights)
assert_allclose(model.parameters, withw.parameters, rtol=10 ** (-4))
@pytest.mark.parametrize('fitter_class', fitters)
def test_fitter_against_LevMar(self, fitter_class):
"""Tests results from non-linear fitters against `LevMarLSQFitter`."""
levmar = LevMarLSQFitter()
fitter = fitter_class()
with ignore_non_integer_warning():
new_model = fitter(self.gauss, self.xdata, self.ydata)
model = levmar(self.gauss, self.xdata, self.ydata)
assert_allclose(model.parameters, new_model.parameters,
rtol=10 ** (-4))
def test_LSQ_SLSQP_with_constraints(self):
"""
Runs `LevMarLSQFitter` and `SLSQPLSQFitter` on a model with
constraints.
"""
g1 = models.Gaussian1D(100, 5, stddev=1)
g1.mean.fixed = True
fitter = LevMarLSQFitter()
fslsqp = SLSQPLSQFitter()
with ignore_non_integer_warning():
slsqp_model = fslsqp(g1, self.xdata, self.ydata)
model = fitter(g1, self.xdata, self.ydata)
assert_allclose(model.parameters, slsqp_model.parameters,
rtol=10 ** (-4))
def test_simplex_lsq_fitter(self):
"""A basic test for the `SimplexLSQ` fitter."""
class Rosenbrock(Fittable2DModel):
a = Parameter()
b = Parameter()
@staticmethod
def evaluate(x, y, a, b):
return (a - x) ** 2 + b * (y - x ** 2) ** 2
x = y = np.linspace(-3.0, 3.0, 100)
with NumpyRNGContext(_RANDOM_SEED):
z = Rosenbrock.evaluate(x, y, 1.0, 100.0)
z += np.random.normal(0., 0.1, size=z.shape)
fitter = SimplexLSQFitter()
r_i = Rosenbrock(1, 100)
r_f = fitter(r_i, x, y, z)
assert_allclose(r_f.parameters, [1.0, 100.0], rtol=1e-2)
def test_param_cov(self):
"""
Tests that the 'param_cov' fit_info entry gets the right answer for
*linear* least squares, where the answer is exact
"""
a = 2
b = 100
with NumpyRNGContext(_RANDOM_SEED):
x = np.linspace(0, 1, 100)
# y scatter is amplitude ~1 to make sure covarience is
# non-negligible
y = x*a + b + np.random.randn(len(x))
# first compute the ordinary least squares covariance matrix
X = np.matrix(np.vstack([x, np.ones(len(x))]).T)
beta = np.linalg.inv(X.T * X) * X.T * np.matrix(y).T
s2 = np.sum((y - (X * beta).A.ravel())**2) / (len(y) - len(beta))
olscov = np.linalg.inv(X.T * X) * s2
# now do the non-linear least squares fit
mod = models.Linear1D(a, b)
fitter = LevMarLSQFitter()
fmod = fitter(mod, x, y)
assert_allclose(fmod.parameters, beta.A.ravel())
assert_allclose(olscov, fitter.fit_info['param_cov'])
@pytest.mark.skipif('not HAS_PKG')
class TestEntryPoint:
"""Tests population of fitting with entry point fitters"""
def setup_class(self):
self.exception_not_thrown = Exception("The test should not have gotten here. There was no exception thrown")
def successfulimport(self):
# This should work
class goodclass(Fitter):
__name__ = "GoodClass"
return goodclass
def raiseimporterror(self):
# This should fail as it raises an Import Error
raise ImportError
def returnbadfunc(self):
def badfunc():
# This should import but it should fail type check
pass
return badfunc
def returnbadclass(self):
# This should import But it should fail subclass type check
class badclass:
pass
return badclass
def test_working(self):
"""This should work fine"""
mock_entry_working = mock.create_autospec(EntryPoint)
mock_entry_working.name = "Working"
mock_entry_working.load = self.successfulimport
populate_entry_points([mock_entry_working])
def test_import_error(self):
"""This raises an import error on load to test that it is handled correctly"""
with warnings.catch_warnings():
warnings.filterwarnings('error')
try:
mock_entry_importerror = mock.create_autospec(EntryPoint)
mock_entry_importerror.name = "IErr"
mock_entry_importerror.load = self.raiseimporterror
populate_entry_points([mock_entry_importerror])
except AstropyUserWarning as w:
if "ImportError" in w.args[0]: # any error for this case should have this in it.
pass
else:
raise w
else:
raise self.exception_not_thrown
def test_bad_func(self):
"""This returns a function which fails the type check"""
with warnings.catch_warnings():
warnings.filterwarnings('error')
try:
mock_entry_badfunc = mock.create_autospec(EntryPoint)
mock_entry_badfunc.name = "BadFunc"
mock_entry_badfunc.load = self.returnbadfunc
populate_entry_points([mock_entry_badfunc])
except AstropyUserWarning as w:
if "Class" in w.args[0]: # any error for this case should have this in it.
pass
else:
raise w
else:
raise self.exception_not_thrown
def test_bad_class(self):
"""This returns a class which doesn't inherient from fitter """
with warnings.catch_warnings():
warnings.filterwarnings('error')
try:
mock_entry_badclass = mock.create_autospec(EntryPoint)
mock_entry_badclass.name = "BadClass"
mock_entry_badclass.load = self.returnbadclass
populate_entry_points([mock_entry_badclass])
except AstropyUserWarning as w:
if 'modeling.Fitter' in w.args[0]: # any error for this case should have this in it.
pass
else:
raise w
else:
raise self.exception_not_thrown
@pytest.mark.skipif('not HAS_SCIPY')
class Test1DFittingWithOutlierRemoval:
def setup_class(self):
self.x = np.linspace(-5., 5., 200)
self.model_params = (3.0, 1.3, 0.8)
def func(p, x):
return p[0]*np.exp(-0.5*(x - p[1])**2/p[2]**2)
self.y = func(self.model_params, self.x)
def test_with_fitters_and_sigma_clip(self):
import scipy.stats as stats
np.random.seed(0)
c = stats.bernoulli.rvs(0.25, size=self.x.shape)
self.y += (np.random.normal(0., 0.2, self.x.shape) +
c*np.random.normal(3.0, 5.0, self.x.shape))
g_init = models.Gaussian1D(amplitude=1., mean=0, stddev=1.)
# test with Levenberg-Marquardt Least Squares fitter
fit = FittingWithOutlierRemoval(LevMarLSQFitter(), sigma_clip,
niter=3, sigma=3.0)
fitted_model, _ = fit(g_init, self.x, self.y)
assert_allclose(fitted_model.parameters, self.model_params, rtol=1e-1)
# test with Sequential Least Squares Programming fitter
fit = FittingWithOutlierRemoval(SLSQPLSQFitter(), sigma_clip,
niter=3, sigma=3.0)
fitted_model, _ = fit(g_init, self.x, self.y)
assert_allclose(fitted_model.parameters, self.model_params, rtol=1e-1)
# test with Simplex LSQ fitter
fit = FittingWithOutlierRemoval(SimplexLSQFitter(), sigma_clip,
niter=3, sigma=3.0)
fitted_model, _ = fit(g_init, self.x, self.y)
assert_allclose(fitted_model.parameters, self.model_params, atol=1e-1)
@pytest.mark.skipif('not HAS_SCIPY')
class Test2DFittingWithOutlierRemoval:
def setup_class(self):
self.y, self.x = np.mgrid[-3:3:128j, -3:3:128j]
self.model_params = (3.0, 1.0, 0.0, 0.8, 0.8)
def Gaussian_2D(p, pos):
return p[0]*np.exp(-0.5*(pos[0] - p[2])**2 / p[4]**2 -
0.5*(pos[1] - p[1])**2 / p[3]**2)
self.z = Gaussian_2D(self.model_params, np.array([self.y, self.x]))
def initial_guess(self, data, pos):
y = pos[0]
x = pos[1]
"""computes the centroid of the data as the initial guess for the
center position"""
wx = x * data
wy = y * data
total_intensity = np.sum(data)
x_mean = np.sum(wx) / total_intensity
y_mean = np.sum(wy) / total_intensity
x_to_pixel = x[0].size / (x[x[0].size - 1][x[0].size - 1] - x[0][0])
y_to_pixel = y[0].size / (y[y[0].size - 1][y[0].size - 1] - y[0][0])
x_pos = np.around(x_mean * x_to_pixel + x[0].size / 2.).astype(int)
y_pos = np.around(y_mean * y_to_pixel + y[0].size / 2.).astype(int)
amplitude = data[y_pos][x_pos]
return amplitude, x_mean, y_mean
def test_with_fitters_and_sigma_clip(self):
import scipy.stats as stats
np.random.seed(0)
c = stats.bernoulli.rvs(0.25, size=self.z.shape)
self.z += (np.random.normal(0., 0.2, self.z.shape) +
c*np.random.normal(self.z, 2.0, self.z.shape))
guess = self.initial_guess(self.z, np.array([self.y, self.x]))
g2_init = models.Gaussian2D(amplitude=guess[0], x_mean=guess[1],
y_mean=guess[2], x_stddev=0.75,
y_stddev=1.25)
# test with Levenberg-Marquardt Least Squares fitter
fit = FittingWithOutlierRemoval(LevMarLSQFitter(), sigma_clip,
niter=3, sigma=3.)
fitted_model, _ = fit(g2_init, self.x, self.y, self.z)
assert_allclose(fitted_model.parameters[0:5], self.model_params,
atol=1e-1)
# test with Sequential Least Squares Programming fitter
fit = FittingWithOutlierRemoval(SLSQPLSQFitter(), sigma_clip, niter=3,
sigma=3.)
fitted_model, _ = fit(g2_init, self.x, self.y, self.z)
assert_allclose(fitted_model.parameters[0:5], self.model_params,
atol=1e-1)
# test with Simplex LSQ fitter
fit = FittingWithOutlierRemoval(SimplexLSQFitter(), sigma_clip,
niter=3, sigma=3.)
fitted_model, _ = fit(g2_init, self.x, self.y, self.z)
assert_allclose(fitted_model.parameters[0:5], self.model_params,
atol=1e-1)
def test_1d_set_fitting_with_outlier_removal():
"""Test model set fitting with outlier removal (issue #6819)"""
poly_set = models.Polynomial1D(2, n_models=2)
fitter = FittingWithOutlierRemoval(LinearLSQFitter(),
sigma_clip, sigma=2.5, niter=3,
cenfunc=np.ma.mean, stdfunc=np.ma.std)
x = np.arange(10)
y = np.array([2.5*x - 4, 2*x*x + x + 10])
y[1,5] = -1000 # outlier
poly_set, filt_y = fitter(poly_set, x, y)
assert_allclose(poly_set.c0, [-4., 10.], atol=1e-14)
assert_allclose(poly_set.c1, [2.5, 1.], atol=1e-14)
assert_allclose(poly_set.c2, [0., 2.], atol=1e-14)
def test_2d_set_axis_2_fitting_with_outlier_removal():
"""Test fitting 2D model set (axis 2) with outlier removal (issue #6819)"""
poly_set = models.Polynomial2D(1, n_models=2, model_set_axis=2)
fitter = FittingWithOutlierRemoval(LinearLSQFitter(),
sigma_clip, sigma=2.5, niter=3,
cenfunc=np.ma.mean, stdfunc=np.ma.std)
y, x = np.mgrid[0:5, 0:5]
z = np.rollaxis(np.array([x+y, 1-0.1*x+0.2*y]), 0, 3)
z[3,3:5,0] = 100. # outliers
poly_set, filt_z = fitter(poly_set, x, y, z)
assert_allclose(poly_set.c0_0, [[[0., 1.]]], atol=1e-14)
assert_allclose(poly_set.c1_0, [[[1., -0.1]]], atol=1e-14)
assert_allclose(poly_set.c0_1, [[[1., 0.2]]], atol=1e-14)
@pytest.mark.skipif('not HAS_SCIPY')
class TestWeightedFittingWithOutlierRemoval:
"""Issue #7020 """
def setup_class(self):
# values of x,y not important as we fit y(x,y) = p0 model here
self.y, self.x = np.mgrid[0:20, 0:20]
self.z = np.mod(self.x + self.y, 2) * 2 - 1 # -1,1 chessboard
self.weights = np.mod(self.x + self.y, 2) * 2 + 1 # 1,3 chessboard
self.z[0,0] = 1000.0 # outlier
self.z[0,1] = 1000.0 # outlier
self.x1d = self.x.flatten()
self.z1d = self.z.flatten()
self.weights1d = self.weights.flatten()
def test_1d_without_weights_without_sigma_clip(self):
model = models.Polynomial1D(0)
fitter = LinearLSQFitter()
fit = fitter(model, self.x1d, self.z1d)
assert_allclose(fit.parameters[0], self.z1d.mean(), atol=10**(-2))
def test_1d_without_weights_with_sigma_clip(self):
model = models.Polynomial1D(0)
fitter = FittingWithOutlierRemoval(LinearLSQFitter(), sigma_clip,
niter=3, sigma=3.)
fit, mask = fitter(model, self.x1d, self.z1d)
assert((~mask).sum() == self.z1d.size - 2)
assert(mask[0] and mask[1])
assert_allclose(fit.parameters[0], 0.0, atol=10**(-2)) # with removed outliers mean is 0.0
def test_1d_with_weights_without_sigma_clip(self):
model = models.Polynomial1D(0)
fitter = LinearLSQFitter()
fit = fitter(model, self.x1d, self.z1d, weights=self.weights1d)
assert(fit.parameters[0] > 1.0) # outliers pulled it high
def test_1d_with_weights_with_sigma_clip(self):
"""smoke test for #7020 - fails without fitting.py patch because weights does not propagate"""
model = models.Polynomial1D(0)
fitter = FittingWithOutlierRemoval(LinearLSQFitter(), sigma_clip,
niter=3, sigma=3.)
fit, filtered = fitter(model, self.x1d, self.z1d, weights=self.weights1d)
assert(fit.parameters[0] > 10**(-2)) # weights pulled it > 0
assert(fit.parameters[0] < 1.0) # outliers didn't pull it out of [-1:1] because they had been removed
def test_1d_set_with_common_weights_with_sigma_clip(self):
"""added for #6819 (1D model set with weights in common)"""
model = models.Polynomial1D(0, n_models=2)
fitter = FittingWithOutlierRemoval(LinearLSQFitter(), sigma_clip,
niter=3, sigma=3.)
z1d = np.array([self.z1d, self.z1d])
fit, filtered = fitter(model, self.x1d, z1d, weights=self.weights1d)
assert_allclose(fit.parameters, [0.8, 0.8], atol=1e-14)
def test_2d_without_weights_without_sigma_clip(self):
model = models.Polynomial2D(0)
fitter = LinearLSQFitter()
fit = fitter(model, self.x, self.y, self.z)
assert_allclose(fit.parameters[0], self.z.mean(), atol=10**(-2))
def test_2d_without_weights_with_sigma_clip(self):
model = models.Polynomial2D(0)
fitter = FittingWithOutlierRemoval(LinearLSQFitter(), sigma_clip,
niter=3, sigma=3.)
fit, mask = fitter(model, self.x, self.y, self.z)
assert((~mask).sum() == self.z.size - 2)
assert(mask[0,0] and mask[0,1])
assert_allclose(fit.parameters[0], 0.0, atol=10**(-2))
def test_2d_with_weights_without_sigma_clip(self):
model = models.Polynomial2D(0)
fitter = LevMarLSQFitter() # LinearLSQFitter doesn't handle weights properly in 2D
fit = fitter(model, self.x, self.y, self.z, weights=self.weights)
assert(fit.parameters[0] > 1.0) # outliers pulled it high
def test_2d_with_weights_with_sigma_clip(self):
"""smoke test for #7020 - fails without fitting.py patch because weights does not propagate"""
model = models.Polynomial2D(0)
fitter = FittingWithOutlierRemoval(LevMarLSQFitter(), sigma_clip,
niter=3, sigma=3.)
fit, filtered = fitter(model, self.x, self.y, self.z, weights=self.weights)
assert(fit.parameters[0] > 10**(-2)) # weights pulled it > 0
assert(fit.parameters[0] < 1.0) # outliers didn't pull it out of [-1:1] because they had been removed
@pytest.mark.skipif('not HAS_SCIPY')
def test_fitters_with_weights():
"""Issue #5737 """
Xin, Yin = np.mgrid[0:21, 0:21]
fitter = LevMarLSQFitter()
with NumpyRNGContext(_RANDOM_SEED):
zsig = np.random.normal(0, 0.01, size=Xin.shape)
# Non-linear model
g2 = models.Gaussian2D(10, 10, 9, 2, 3)
z = g2(Xin, Yin)
gmod = fitter(models.Gaussian2D(15, 7, 8, 1.3, 1.2), Xin, Yin, z + zsig)
assert_allclose(gmod.parameters, g2.parameters, atol=10 ** (-2))
# Linear model
p2 = models.Polynomial2D(3)
p2.parameters = np.arange(10)/1.2
z = p2(Xin, Yin)
pmod = fitter(models.Polynomial2D(3), Xin, Yin, z + zsig)
assert_allclose(pmod.parameters, p2.parameters, atol=10 ** (-2))
@pytest.mark.skipif('not HAS_SCIPY')
def test_fitters_interface():
"""
Test that **kwargs work with all optimizers.
This is a basic smoke test.
"""
levmar = LevMarLSQFitter()
slsqp = SLSQPLSQFitter()
simplex = SimplexLSQFitter()
kwargs = {'maxiter': 77, 'verblevel': 1, 'epsilon': 1e-2, 'acc': 1e-6}
simplex_kwargs = {'maxiter': 77, 'verblevel': 1, 'acc': 1e-6}
model = models.Gaussian1D(10, 4, .3)
x = np.arange(21)
y = model(x)
slsqp_model = slsqp(model, x, y, **kwargs)
simplex_model = simplex(model, x, y, **simplex_kwargs)
kwargs.pop('verblevel')
lm_model = levmar(model, x, y, **kwargs)
|
bsipocz/astropy
|
astropy/modeling/tests/test_fitters.py
|
Python
|
bsd-3-clause
| 33,395
|
[
"Gaussian"
] |
b40cd26ebdaae06bd092211580e3c32bc8b0831569e23b573f51c8526cabf148
|
#! /usr/bin/python
#Guruprasad Ananda
#MAQ mapper for SOLiD colourspace-reads
import sys, os, zipfile, tempfile, subprocess
def stop_err( msg ):
sys.stderr.write( "%s\n" % msg )
sys.exit()
def __main__():
out_fname = sys.argv[1].strip()
out_f2 = open(sys.argv[2].strip(),'r+')
ref_fname = sys.argv[3].strip()
f3_read_fname = sys.argv[4].strip()
f3_qual_fname = sys.argv[5].strip()
paired = sys.argv[6]
if paired == 'yes':
r3_read_fname = sys.argv[7].strip()
r3_qual_fname = sys.argv[8].strip()
min_mapqual = int(sys.argv[9].strip())
max_mismatch = int(sys.argv[10].strip())
out_f3name = sys.argv[11].strip()
subprocess_dict = {}
ref_csfa = tempfile.NamedTemporaryFile()
ref_bfa = tempfile.NamedTemporaryFile()
ref_csbfa = tempfile.NamedTemporaryFile()
cmd2_1 = 'maq fasta2csfa %s > %s 2>&1' %(ref_fname,ref_csfa.name)
cmd2_2 = 'maq fasta2bfa %s %s 2>&1' %(ref_csfa.name,ref_csbfa.name)
cmd2_3 = 'maq fasta2bfa %s %s 2>&1' %(ref_fname,ref_bfa.name)
try:
os.system(cmd2_1)
os.system(cmd2_2)
os.system(cmd2_3)
except Exception, erf:
stop_err(str(erf)+"Error processing reference sequence")
if paired == 'yes': #paired end reads
tmpf = tempfile.NamedTemporaryFile() #forward reads
tmpr = tempfile.NamedTemporaryFile() #reverse reads
tmps = tempfile.NamedTemporaryFile() #single reads
tmpffastq = tempfile.NamedTemporaryFile()
tmprfastq = tempfile.NamedTemporaryFile()
tmpsfastq = tempfile.NamedTemporaryFile()
cmd1 = "solid2fastq_modified.pl 'yes' %s %s %s %s %s %s %s 2>&1" %(tmpf.name,tmpr.name,tmps.name,f3_read_fname,f3_qual_fname,r3_read_fname,r3_qual_fname)
try:
os.system(cmd1)
os.system('zcat -f %s >> %s' %(tmpf.name,tmpffastq.name))
os.system('zcat -f %s >> %s' %(tmpr.name,tmprfastq.name))
os.system('zcat -f %s >> %s' %(tmps.name,tmpsfastq.name))
except Exception, eq:
stop_err("Error converting data to fastq format." + str(eq))
#make a temp directory where the split fastq files will be stored
try:
split_dir = tempfile.mkdtemp()
split_file_prefix_f = tempfile.mktemp(dir=split_dir)
split_file_prefix_r = tempfile.mktemp(dir=split_dir)
splitcmd_f = 'split -a 2 -l %d %s %s' %(32000000,tmpffastq.name,split_file_prefix_f) #32M lines correspond to 8M reads
splitcmd_r = 'split -a 2 -l %d %s %s' %(32000000,tmprfastq.name,split_file_prefix_r) #32M lines correspond to 8M reads
os.system(splitcmd_f)
os.system(splitcmd_r)
os.chdir(split_dir)
ii = 0
for fastq in os.listdir(split_dir):
if not fastq.startswith(split_file_prefix_f.split("/")[-1]):
continue
fastq_r = split_file_prefix_r + fastq.split(split_file_prefix_f.split("/")[-1])[1] #find the reverse strand fastq corresponding to formward strand fastq
tmpbfq_f = tempfile.NamedTemporaryFile()
tmpbfq_r = tempfile.NamedTemporaryFile()
cmd3 = 'maq fastq2bfq %s %s 2>&1; maq fastq2bfq %s %s 2>&1; maq map -c %s.csmap %s %s %s 1>/dev/null 2>&1; maq mapview %s.csmap > %s.txt' %(fastq,tmpbfq_f.name,fastq_r,tmpbfq_r.name,fastq,ref_csbfa.name,tmpbfq_f.name,tmpbfq_r.name,fastq,fastq)
subprocess_dict['sp'+str(ii+1)] = subprocess.Popen([cmd3],shell=True,stdout=subprocess.PIPE)
ii += 1
while True:
all_done = True
for j,k in enumerate(subprocess_dict.keys()):
if subprocess_dict['sp'+str(j+1)].wait() != 0:
err = subprocess_dict['sp'+str(j+1)].communicate()[1]
if err != None:
stop_err("Mapping error: %s" %err)
all_done = False
if all_done:
break
cmdout = "for map in *.txt; do cat $map >> %s; done" %(out_fname)
os.system(cmdout)
tmpcsmap = tempfile.NamedTemporaryFile()
cmd_cat_csmap = "for csmap in *.csmap; do cat $csmap >> %s; done" %(tmpcsmap.name)
os.system(cmd_cat_csmap)
tmppileup = tempfile.NamedTemporaryFile()
cmdpileup = "maq pileup -m %s -q %s %s %s > %s" %(max_mismatch,min_mapqual,ref_bfa.name,tmpcsmap.name,tmppileup.name)
os.system(cmdpileup)
tmppileup.seek(0)
print >> out_f2, "#chr\tposition\tref_nt\tcoverage\tSNP_count\tA_count\tT_count\tG_count\tC_count"
for line in file(tmppileup.name):
elems = line.strip().split()
ref_nt = elems[2].capitalize()
read_nt = elems[4]
coverage = int(elems[3])
a,t,g,c = 0,0,0,0
ref_nt_count = 0
for ch in read_nt:
ch = ch.capitalize()
if ch not in ['A','T','G','C',',','.']:
continue
if ch in [',','.']:
ch = ref_nt
ref_nt_count += 1
try:
nt_ind = ['A','T','G','C'].index(ch)
if nt_ind == 0:
a+=1
elif nt_ind == 1:
t+=1
elif nt_ind == 2:
g+=1
else:
c+=1
except ValueError, we:
print >>sys.stderr, we
print >> out_f2, "%s\t%s\t%s\t%s\t%s\t%s" %("\t".join(elems[:4]),coverage-ref_nt_count,a,t,g,c)
except Exception, er2:
stop_err("Encountered error while mapping: %s" %(str(er2)))
else: #single end reads
tmpf = tempfile.NamedTemporaryFile()
tmpfastq = tempfile.NamedTemporaryFile()
cmd1 = "solid2fastq_modified.pl 'no' %s %s %s %s %s %s %s 2>&1" %(tmpf.name,None,None,f3_read_fname,f3_qual_fname,None,None)
try:
os.system(cmd1)
os.system('zcat -f %s >> %s' %(tmpf.name,tmpfastq.name))
tmpf.close()
except:
stop_err("Error converting data to fastq format.")
#make a temp directory where the split fastq files will be stored
try:
split_dir = tempfile.mkdtemp()
split_file_prefix = tempfile.mktemp(dir=split_dir)
splitcmd = 'split -a 2 -l %d %s %s' %(32000000,tmpfastq.name,split_file_prefix) #32M lines correspond to 8M reads
os.system(splitcmd)
os.chdir(split_dir)
for i,fastq in enumerate(os.listdir(split_dir)):
tmpbfq = tempfile.NamedTemporaryFile()
cmd3 = 'maq fastq2bfq %s %s 2>&1; maq map -c %s.csmap %s %s 1>/dev/null 2>&1; maq mapview %s.csmap > %s.txt' %(fastq,tmpbfq.name,fastq,ref_csbfa.name,tmpbfq.name,fastq,fastq)
subprocess_dict['sp'+str(i+1)] = subprocess.Popen([cmd3],shell=True,stdout=subprocess.PIPE)
while True:
all_done = True
for j,k in enumerate(subprocess_dict.keys()):
if subprocess_dict['sp'+str(j+1)].wait() != 0:
err = subprocess_dict['sp'+str(j+1)].communicate()[1]
if err != None:
stop_err("Mapping error: %s" %err)
all_done = False
if all_done:
break
cmdout = "for map in *.txt; do cat $map >> %s; done" %(out_fname)
os.system(cmdout)
tmpcsmap = tempfile.NamedTemporaryFile()
cmd_cat_csmap = "for csmap in *.csmap; do cat $csmap >> %s; done" %(tmpcsmap.name)
os.system(cmd_cat_csmap)
tmppileup = tempfile.NamedTemporaryFile()
cmdpileup = "maq pileup -m %s -q %s %s %s > %s" %(max_mismatch,min_mapqual,ref_bfa.name,tmpcsmap.name,tmppileup.name)
os.system(cmdpileup)
tmppileup.seek(0)
print >> out_f2, "#chr\tposition\tref_nt\tcoverage\tSNP_count\tA_count\tT_count\tG_count\tC_count"
for line in file(tmppileup.name):
elems = line.strip().split()
ref_nt = elems[2].capitalize()
read_nt = elems[4]
coverage = int(elems[3])
a,t,g,c = 0,0,0,0
ref_nt_count = 0
for ch in read_nt:
ch = ch.capitalize()
if ch not in ['A','T','G','C',',','.']:
continue
if ch in [',','.']:
ch = ref_nt
ref_nt_count += 1
try:
nt_ind = ['A','T','G','C'].index(ch)
if nt_ind == 0:
a+=1
elif nt_ind == 1:
t+=1
elif nt_ind == 2:
g+=1
else:
c+=1
except:
pass
print >> out_f2, "%s\t%s\t%s\t%s\t%s\t%s" %("\t".join(elems[:4]),coverage-ref_nt_count,a,t,g,c)
except Exception, er2:
stop_err("Encountered error while mapping: %s" %(str(er2)))
#Build custom track from pileup
chr_list=[]
out_f2.seek(0)
fcov = tempfile.NamedTemporaryFile()
fout_a = tempfile.NamedTemporaryFile()
fout_t = tempfile.NamedTemporaryFile()
fout_g = tempfile.NamedTemporaryFile()
fout_c = tempfile.NamedTemporaryFile()
fcov.write('''track type=wiggle_0 name="Coverage track" description="Coverage track (from Galaxy)" color=0,0,0 visibility=2\n''')
fout_a.write('''track type=wiggle_0 name="Track A" description="Track A (from Galaxy)" color=255,0,0 visibility=2\n''')
fout_t.write('''track type=wiggle_0 name="Track T" description="Track T (from Galaxy)" color=0,255,0 visibility=2\n''')
fout_g.write('''track type=wiggle_0 name="Track G" description="Track G (from Galaxy)" color=0,0,255 visibility=2\n''')
fout_c.write('''track type=wiggle_0 name="Track C" description="Track C (from Galaxy)" color=255,0,255 visibility=2\n''')
for line in out_f2:
if line.startswith("#"):
continue
elems = line.split()
chr = elems[0]
if chr not in chr_list:
chr_list.append(chr)
if not (chr.startswith('chr') or chr.startswith('scaffold')):
chr = 'chr'
header = "variableStep chrom=%s" %(chr)
fcov.write("%s\n" %(header))
fout_a.write("%s\n" %(header))
fout_t.write("%s\n" %(header))
fout_g.write("%s\n" %(header))
fout_c.write("%s\n" %(header))
try:
pos = int(elems[1])
cov = int(elems[3])
a = int(elems[5])
t = int(elems[6])
g = int(elems[7])
c = int(elems[8])
except:
continue
fcov.write("%s\t%s\n" %(pos,cov))
try:
a_freq = a*100./cov
t_freq = t*100./cov
g_freq = g*100./cov
c_freq = c*100./cov
except ZeroDivisionError:
a_freq=t_freq=g_freq=c_freq=0
fout_a.write("%s\t%s\n" %(pos,a_freq))
fout_t.write("%s\t%s\n" %(pos,t_freq))
fout_g.write("%s\t%s\n" %(pos,g_freq))
fout_c.write("%s\t%s\n" %(pos,c_freq))
fcov.seek(0)
fout_a.seek(0)
fout_g.seek(0)
fout_t.seek(0)
fout_c.seek(0)
os.system("cat %s %s %s %s %s | cat > %s" %(fcov.name,fout_a.name,fout_t.name,fout_g.name,fout_c.name,out_f3name))
if __name__=="__main__":
__main__()
|
dbcls/dbcls-galaxy
|
tools/solid_tools/maq_cs_wrapper.py
|
Python
|
mit
| 12,119
|
[
"Galaxy"
] |
4ade322fa2d7f6a79e76ad2a43318aaf0139abcdc5d808192b4c552a32c7bbab
|
#!/usr/bin/env python
# coding: utf-8
# ---
# syncID: f059914f7cf74327908228e63e204d60
# title: "Introduction to NEON API in Python"
# description: "Use the NEON API in Python, via requests package and json package."
# dateCreated: 2020-04-24
# authors: Maxwell J. Burner
# contributors: Donal O'Leary
# estimatedTime: 1 hour
# packagesLibraries: requests, json
# topics:
# languagesTool: python
# dataProduct: DP3.10003.001
# code1: https://raw.githubusercontent.com/NEONScience/NEON-Data-Skills/main/tutorials/Python/NEON-API-python/neon_api_01_introduction_requests_py/neon_api_01_introduction_requests_py.py
# tutorialSeries: python-neon-api-series
# urlTitle: neon-api-01-introduction-requests
# ---
# <div id="ds-objectives" markdown="1">
#
# ### Objectives
# After completing this tutorial, you will be able to:
#
# * Understand the components of a NEON API call
# * Understand the basic process of making and processing an API request in Python
# * Query the 'sites/' or 'products/' API endpoints to determine data availability
# * Query the 'data/' API endpoint to get information on specific data files
#
#
# ### Install Python Packages
#
# * **requests**
# * **json**
#
#
#
# </div>
# In this tutorial we will learn to make calls to the NEON API using Python. We will make calls to the 'sites/' and 'products/' endpoints of the API to determine availability of data for specific sites and months, and make a call to the 'data/' endpoint to learn the names and URLs of specific data files.
#
# An API is an [*Application Programming Interface*](https://rapidapi.com/blog/api-glossary/api-call/); this is a system that allows programs to send instructions and requests to servers, typically recieving data in exchange. Whereas sending a URL over the web normally would cause a web page to be displayed, sending an API call URL results in the deisred data being directly downloaded to your computer. NEON provides an API that allows different programming languages to send requests for NEON data files and products.
#
# In this tutorial we will cover how to use API calls to learn about what types of NEON data products are available for different sites and time periods.
# ## Basic API Call Components
#
# The actual API call takes the form of a web URL, the contents of which determine what data is returned. This URL can be broken down into three parts, which appear in order:
#
# - The **base url** is the location of the server storing the data. This will be the same for all NEON API calls.
#
# - The **endpoint** indicates what type of data or metadata we are looking to download. This tutorial covers three endpoints: *sites/*, *products/*, and *data/*; other endpoints will be covered in later tutorials.
#
# - The **target** is a value or series of values that indicate the specific data product, site, location, or data files we are looking up.
#
#
#
# In python we can easily deal with the complexities of the API call with by creating the different parts of the request as strings, then combining them with string concatenation. Concatenating (combining end to end) string in python is as easy as using a '+' sign. This approach makes it easy to modify different parts of our request as needed.
#
#
# In[1]:
import requests
import json
# In[2]:
#Every request begins with the server's URL
SERVER = 'http://data.neonscience.org/api/v0/'
# ## Site Querying
#
# NEON manages 81 different sites across the United States and Puerto Rico. These sites are separated into two main groups, terrestrial and aquatic, and the aquatic sites are further subdivided into lakes, rivers, and wadable streams. Each of these different site types has a different set of instrumentation and observation strategies, therefore, not every data product is available at every site. Often we begin by asking what kinds of data products are available for a given site. This is done by using the *sites/* endpoint in the API; this endpoint is used for getting information about specific NEON field sites. In this example we will query which data products are available at the <a href="https://www.neonscience.org/field-sites/field-sites-map/TEAK" target="_blank">Lower Teakettle (TEAK)</a> site.
# In[3]:
#Site Code for Lower Teakettle
SITECODE = 'TEAK'
# We first use the requests module to send the API request using the 'get' function; this returns a 'request' object.
# To more easily access the data returned by the request, we convert the request object into a Python JSON object.
# In[4]:
#Make request, using the sites/ endpoint
site_request = requests.get(SERVER+'sites/'+SITECODE)
#Convert to Python JSON object
site_json = site_request.json()
# The JSON object in Python is a complex collection, with nested layers of dictionaries ('dicts') and lists.
#
# Briefly, a list is a collection of data in which each element is identified by index number, while a dictionary is a collection in which each element is identified by a label (called a 'key') that is usually a text string. You can visit the [w3schools website](https://www.w3schools.com/python/python_lists.asp) for more information on lists, and the [realpython website](https://realpython.com/python-dicts/) for more information on dictionaries.
#
# Dictionaries are defined using curly brackets ({...}) and lists are defined using square brackets (\[...\]). When we look at the request in JSON format, we can see this this is quite a lot of text arranged in nested dicts and lists:
# In[5]:
site_json
# At the uppermost level the JSON object is a dictionary containing a single element with the label 'data'. This 'data' element in turn contains a dictionary with elements containing various pieces of information about the site. When we want to know what elements a dict contians, we can use the *.keys()* method to list the keys to each element in that dict.
# In[6]:
#Use the 'keys' method to view the component of the uppermost json dictionary
site_json.keys()
# This output shows that the entire API response is contained within a single dict called 'data'. In order to access any of the information contained within this highest-level 'data' dict, we will need to reference that dict directly. Let's view the different keys that are available within 'data':
# In[7]:
#Access the 'data' component, and use the 'keys' method to view to componenets of the json data dictionary
site_json['data'].keys()
# The returned JSON keys includes information on site location, site type, site name and code, and the availability of different data products for the site. This last piece of information is located in the element with the 'dataProducts' key.
#
# The 'dataProducts' element is a list of dictionaries, one for each type of NEON data product available at the site; each of these dictionaries has the same keys, but different values. Let's look at the JSON for the first entry ("\[0\]") in the list of data products:
# In[8]:
#View the first data product dictionary
site_json['data']['dataProducts'][0]
# Lists are a type of sequential data, so we can use Python's *for* loop to directly go through every element one by one, in this case to print out the data product code and data product name.
# In[9]:
#View product code and name for every available data product
for product in site_json['data']['dataProducts']:
print(product['dataProductCode'],product['dataProductTitle'])
# Typically, we use site queries to determine for which months a particular data product is available at a particular site. Let's look for the availability of Breeding Landbird Counts (DP1.10003.001)
# In[10]:
#Look at Breeding Landbird Count data products
PRODUCTCODE = 'DP1.10003.001'
# For each data product, there will be a list of the months for which data of that type was collected and it available at the site, and a corresponding list with the URLs that we would put into the API to get data on that month of data products.
# In[11]:
#Get available months of Breeding Landbird Count data products for TEAK site
#Loop through the 'dataProducts' list items (each one a dict) at the site
for product in site_json['data']['dataProducts']:
if(product['dataProductCode'] == PRODUCTCODE): #If a list item's 'dataProductCode' dict element equals the product code string,
print('Available Months: ',product['availableMonths']) #print the available months and URLs
print('URLs for each Month: ', product['availableDataUrls'])
# ## Data Product Querying
#
# Alternatively, we may want a specific type of data product, but aren't certain of the sites and months for which that data is available. In this case we can use the product code and the *products/* API endpoint to look up availbility.
# In[12]:
#Make request
product_request = requests.get(SERVER+'products/'+PRODUCTCODE)
product_json = product_request.json()
# The product JSON will again store everything first in a 'data' element. Within this container, the product data is a dictionary with information on the data product we are looking up.
# In[13]:
#Print keys for product data dictionary
print(product_json['data'].keys())
# This request returned a lot of different types of information. Much of this information is meant to provide explanations and context for the data product. Let's look at the abstract, which provides a relatively brief description of the data product.
# In[14]:
#Print code, name, and abstract of data product
print(product_json['data']['productCode'])
print(product_json['data']['productName'])
print()
print(product_json['data']['productAbstract'])
#
# For looking up the availability of the data product, we want the 'siteCodes' element. This is a list with an entry for each site at which the data product is available. Each site entry is a dict whose elements includes site code, a list of months for which data is available, and a list of the API request URLs to request data for that site for a given month.
# In[15]:
#View keys of one site dictionary
print(product_json['data']['siteCodes'][0].keys())
# We can look up the availability of data at a particular site, and get a URL to request data for a specific month. We know from earlier that Lower Teakettle (TEAK) has the data product we want for June 2018; we can get the URL needed to request that data with nested loops through site and month lists.
# In[16]:
#View available months and corresponding API urls, then save desired URL
for site in product_json['data']['siteCodes']:
if(site['siteCode'] == SITECODE):
for month in zip(site['availableMonths'],site['availableDataUrls']): #Loop through the list of months and URLs
print(month[0],month[1])
if(month[0] == '2018-06'): #If data is available for the desired month, save the URL
data_url = month[1]
# In[17]:
print(data_url)
# ## Data File Querying
#
# We now know that landbird count data products are available for 2018-06 at the Lower Teakettle site. Using the server url, site code, product code, and a year-month argument, we can make a request to the *data/* endpoint of the NEON API. This will allow us to see what specific landbird count data files can be obtained for 2018-06 at the Lower Teakettle site, and to learn the locations of these files as URLs.
# In[18]:
#Make Request
data_request = requests.get(SERVER+'data/'+PRODUCTCODE+'/'+SITECODE+'/'+'2018-06')
data_json = data_request.json()
# Alternatively we could use one of the "Available Data URLs" from a *sites/* or *products/* request, like the data_url we saved earlier.
# In[19]:
#Make request with saved url
data_request = requests.get(data_url)
data_json = data_request.json()
# In[20]:
#Print dict key for 'data' element of data JSON
print(data_json['data'].keys())
# As with the sites JSON content, the uppermost level of a data request JSON object is a dictionary whose only member has the 'data' key; this member in turn is a dictionary with the product code, the sitecode, the month, and a list of the available data files.
#
# The 'files' list is a list of python dictionaries, one for each file available based on our query; the dictionary for each file includes an internal reference code, the file name, the size of the file in bytes, and the url at which the file is located.
# In[21]:
#View keys and values in first file dict
for key in data_json['data']['files'][0].keys(): #Loop through keys of the data file dict
print(key,':\t', data_json['data']['files'][0][key])
# In[22]:
for file in data_json['data']['files']:
print(file['name'])
# A number of different files are available, but the actual count data are in files which have 'brd_perpoint' or 'brd_countdata' in the file name.
#
# We can use *if* statements to get info on only these files. The Python **in** operator, in addition to being part of the construction of for loops, can check if a particular value is present in a sequence, so it can check if a particular series of characters is present in a string.
# In[23]:
for file in data_json['data']['files']:
if(('_perpoint' in file['name'])|('_countdata' in file['name'])): #if file name includes '_perpoint' or '_countdata'
print(file['name'],file['url'])
# We can download the desired files by simply going to the obtained URLs in any browser. However, we might want the Python script to download the files for us. Alternatively, depending on the type of file, various funcitons exist that could read data from the file directly into Python. We'll dicuss this, along with how to identify which file we want, in the next tutorial.
|
NEONScience/NEON-Data-Skills
|
tutorials/Python/NEON-API-python/neon_api_01_introduction_requests_py/neon_api_01_introduction_requests_py.py
|
Python
|
agpl-3.0
| 13,632
|
[
"VisIt"
] |
c5813407c2f76b368c9ff00f5075294a8b1580b34a00127c2005bae95f29fa8f
|
import time
import six
from kalliope.core.NeuronModule import NeuronModule, MissingParameterException
class Sleep(NeuronModule):
def __init__(self, **kwargs):
super(Sleep, self).__init__(**kwargs)
self.seconds = kwargs.get('seconds', None)
# check parameters
if self._is_parameters_ok():
if isinstance(self.seconds, str) or \
isinstance(self.seconds, six.text_type):
self.seconds = float(self.seconds)
time.sleep(self.seconds)
def _is_parameters_ok(self):
"""
Check if received parameters are ok to perform operations in the neuron
:return: true if parameters are ok, raise an exception otherwise
.. raises:: MissingParameterException
"""
if self.seconds is None:
raise MissingParameterException("You must set a number of seconds as parameter")
return True
|
kalliope-project/kalliope
|
kalliope/neurons/sleep/sleep.py
|
Python
|
gpl-3.0
| 934
|
[
"NEURON"
] |
611ccc366d0adbdc581aa85375ac5c66e31bf9fc0707d6ab530ec6385e0edc81
|
# -*- coding: utf-8 -*-
# Copyright (c) 2006-2014 LOGILAB S.A. (Paris, FRANCE) <contact@logilab.fr>
# Copyright (c) 2009 Mads Kiilerich <mads@kiilerich.com>
# Copyright (c) 2010 Daniel Harding <dharding@gmail.com>
# Copyright (c) 2011-2014, 2017 Google, Inc.
# Copyright (c) 2012 FELD Boris <lothiraldan@gmail.com>
# Copyright (c) 2013-2017 Claudiu Popa <pcmanticore@gmail.com>
# Copyright (c) 2014 Michal Nowikowski <godfryd@gmail.com>
# Copyright (c) 2014 Brett Cannon <brett@python.org>
# Copyright (c) 2014 Ricardo Gemignani <ricardo.gemignani@gmail.com>
# Copyright (c) 2014 Arun Persaud <arun@nubati.net>
# Copyright (c) 2015 Dmitry Pribysh <dmand@yandex.ru>
# Copyright (c) 2015 Radu Ciorba <radu@devrandom.ro>
# Copyright (c) 2015 Simu Toni <simutoni@gmail.com>
# Copyright (c) 2015 Ionel Cristian Maries <contact@ionelmc.ro>
# Copyright (c) 2016-2017 Derek Gustafson <degustaf@gmail.com>
# Copyright (c) 2016-2017 Łukasz Rogalski <rogalski.91@gmail.com>
# Copyright (c) 2016 Grant Welch <gwelch925+github@gmail.com>
# Copyright (c) 2016 Ashley Whetter <ashley@awhetter.co.uk>
# Copyright (c) 2016 Jakub Wilk <jwilk@jwilk.net>
# Copyright (c) 2017 hippo91 <guillaume.peillex@gmail.com>
# Copyright (c) 2017 Dan Garrette <dhgarrette@gmail.com>
# Copyright (c) 2017 Ville Skyttä <ville.skytta@iki.fi>
# Licensed under the GPL: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html
# For details: https://github.com/PyCQA/pylint/blob/master/COPYING
"""variables checkers for Python code
"""
import copy
import itertools
import collections
import os
import sys
import re
try:
from functools import lru_cache
except ImportError:
from backports.functools_lru_cache import lru_cache
import six
import astroid
from astroid import decorators
from astroid import modutils
from pylint.interfaces import IAstroidChecker, INFERENCE, INFERENCE_FAILURE, HIGH
from pylint.utils import get_global_option
from pylint.checkers import BaseChecker
from pylint.checkers import utils
SPECIAL_OBJ = re.compile("^_{2}[a-z]+_{2}$")
FUTURE = '__future__'
# regexp for ignored argument name
IGNORED_ARGUMENT_NAMES = re.compile('_.*|^ignored_|^unused_')
PY3K = sys.version_info >= (3, 0)
def _is_from_future_import(stmt, name):
"""Check if the name is a future import from another module."""
try:
module = stmt.do_import_module(stmt.modname)
except astroid.AstroidBuildingException:
return None
for local_node in module.locals.get(name, []):
if (isinstance(local_node, astroid.ImportFrom)
and local_node.modname == FUTURE):
return True
return None
def in_for_else_branch(parent, stmt):
"""Returns True if stmt in inside the else branch for a parent For stmt."""
return (isinstance(parent, astroid.For) and
any(else_stmt.parent_of(stmt) or else_stmt == stmt
for else_stmt in parent.orelse))
@lru_cache(maxsize=1000)
def overridden_method(klass, name):
"""get overridden method if any"""
try:
parent = next(klass.local_attr_ancestors(name))
except (StopIteration, KeyError):
return None
try:
meth_node = parent[name]
except KeyError:
# We have found an ancestor defining <name> but it's not in the local
# dictionary. This may happen with astroid built from living objects.
return None
if isinstance(meth_node, astroid.FunctionDef):
return meth_node
return None
def _get_unpacking_extra_info(node, infered):
"""return extra information to add to the message for unpacking-non-sequence
and unbalanced-tuple-unpacking errors
"""
more = ''
infered_module = infered.root().name
if node.root().name == infered_module:
if node.lineno == infered.lineno:
more = ' %s' % infered.as_string()
elif infered.lineno:
more = ' defined at line %s' % infered.lineno
elif infered.lineno:
more = ' defined at line %s of %s' % (infered.lineno, infered_module)
return more
def _detect_global_scope(node, frame, defframe):
""" Detect that the given frames shares a global
scope.
Two frames shares a global scope when neither
of them are hidden under a function scope, as well
as any of parent scope of them, until the root scope.
In this case, depending from something defined later on
will not work, because it is still undefined.
Example:
class A:
# B has the same global scope as `C`, leading to a NameError.
class B(C): ...
class C: ...
"""
def_scope = scope = None
if frame and frame.parent:
scope = frame.parent.scope()
if defframe and defframe.parent:
def_scope = defframe.parent.scope()
if isinstance(frame, astroid.FunctionDef):
# If the parent of the current node is a
# function, then it can be under its scope
# (defined in, which doesn't concern us) or
# the `->` part of annotations. The same goes
# for annotations of function arguments, they'll have
# their parent the Arguments node.
if not isinstance(node.parent,
(astroid.FunctionDef, astroid.Arguments)):
return False
elif any(not isinstance(f, (astroid.ClassDef, astroid.Module))
for f in (frame, defframe)):
# Not interested in other frames, since they are already
# not in a global scope.
return False
break_scopes = []
for s in (scope, def_scope):
# Look for parent scopes. If there is anything different
# than a module or a class scope, then they frames don't
# share a global scope.
parent_scope = s
while parent_scope:
if not isinstance(parent_scope, (astroid.ClassDef, astroid.Module)):
break_scopes.append(parent_scope)
break
if parent_scope.parent:
parent_scope = parent_scope.parent.scope()
else:
break
if break_scopes and len(set(break_scopes)) != 1:
# Store different scopes than expected.
# If the stored scopes are, in fact, the very same, then it means
# that the two frames (frame and defframe) shares the same scope,
# and we could apply our lineno analysis over them.
# For instance, this works when they are inside a function, the node
# that uses a definition and the definition itself.
return False
# At this point, we are certain that frame and defframe shares a scope
# and the definition of the first depends on the second.
return frame.lineno < defframe.lineno
def _fix_dot_imports(not_consumed):
""" Try to fix imports with multiple dots, by returning a dictionary
with the import names expanded. The function unflattens root imports,
like 'xml' (when we have both 'xml.etree' and 'xml.sax'), to 'xml.etree'
and 'xml.sax' respectively.
"""
# TODO: this should be improved in issue astroid #46
names = {}
for name, stmts in six.iteritems(not_consumed):
if any(isinstance(stmt, astroid.AssignName)
and isinstance(stmt.assign_type(), astroid.AugAssign)
for stmt in stmts):
continue
for stmt in stmts:
if not isinstance(stmt, (astroid.ImportFrom, astroid.Import)):
continue
for imports in stmt.names:
second_name = None
if imports[0] == "*":
# In case of wildcard imports,
# pick the name from inside the imported module.
second_name = name
else:
if imports[0].find(".") > -1 or name in imports:
# Most likely something like 'xml.etree',
# which will appear in the .locals as 'xml'.
# Only pick the name if it wasn't consumed.
second_name = imports[0]
if second_name and second_name not in names:
names[second_name] = stmt
return sorted(names.items(), key=lambda a: a[1].fromlineno)
def _find_frame_imports(name, frame):
"""
Detect imports in the frame, with the required
*name*. Such imports can be considered assignments.
Returns True if an import for the given name was found.
"""
imports = frame.nodes_of_class((astroid.Import, astroid.ImportFrom))
for import_node in imports:
for import_name, import_alias in import_node.names:
# If the import uses an alias, check only that.
# Otherwise, check only the import name.
if import_alias:
if import_alias == name:
return True
elif import_name and import_name == name:
return True
return None
def _import_name_is_global(stmt, global_names):
for import_name, import_alias in stmt.names:
# If the import uses an alias, check only that.
# Otherwise, check only the import name.
if import_alias:
if import_alias in global_names:
return True
elif import_name in global_names:
return True
return False
def _flattened_scope_names(iterator):
values = (set(stmt.names) for stmt in iterator)
return set(itertools.chain.from_iterable(values))
def _assigned_locally(name_node):
"""
Checks if name_node has corresponding assign statement in same scope
"""
assign_stmts = name_node.scope().nodes_of_class(astroid.AssignName)
return any(a.name == name_node.name for a in assign_stmts)
MSGS = {
'E0601': ('Using variable %r before assignment',
'used-before-assignment',
'Used when a local variable is accessed before it\'s \
assignment.'),
'E0602': ('Undefined variable %r',
'undefined-variable',
'Used when an undefined variable is accessed.'),
'E0603': ('Undefined variable name %r in __all__',
'undefined-all-variable',
'Used when an undefined variable name is referenced in __all__.'),
'E0604': ('Invalid object %r in __all__, must contain only strings',
'invalid-all-object',
'Used when an invalid (non-string) object occurs in __all__.'),
'E0611': ('No name %r in module %r',
'no-name-in-module',
'Used when a name cannot be found in a module.'),
'W0601': ('Global variable %r undefined at the module level',
'global-variable-undefined',
'Used when a variable is defined through the "global" statement \
but the variable is not defined in the module scope.'),
'W0602': ('Using global for %r but no assignment is done',
'global-variable-not-assigned',
'Used when a variable is defined through the "global" statement \
but no assignment to this variable is done.'),
'W0603': ('Using the global statement', # W0121
'global-statement',
'Used when you use the "global" statement to update a global \
variable. Pylint just try to discourage this \
usage. That doesn\'t mean you cannot use it !'),
'W0604': ('Using the global statement at the module level', # W0103
'global-at-module-level',
'Used when you use the "global" statement at the module level \
since it has no effect'),
'W0611': ('Unused %s',
'unused-import',
'Used when an imported module or variable is not used.'),
'W0612': ('Unused variable %r',
'unused-variable',
'Used when a variable is defined but not used.'),
'W0613': ('Unused argument %r',
'unused-argument',
'Used when a function or method argument is not used.'),
'W0614': ('Unused import %s from wildcard import',
'unused-wildcard-import',
'Used when an imported module or variable is not used from a \
`\'from X import *\'` style import.'),
'W0621': ('Redefining name %r from outer scope (line %s)',
'redefined-outer-name',
'Used when a variable\'s name hides a name defined in the outer \
scope.'),
'W0622': ('Redefining built-in %r',
'redefined-builtin',
'Used when a variable or function override a built-in.'),
'W0623': ('Redefining name %r from %s in exception handler',
'redefine-in-handler',
'Used when an exception handler assigns the exception \
to an existing name'),
'W0631': ('Using possibly undefined loop variable %r',
'undefined-loop-variable',
'Used when an loop variable (i.e. defined by a for loop or \
a list comprehension or a generator expression) is used outside \
the loop.'),
'E0632': ('Possible unbalanced tuple unpacking with '
'sequence%s: '
'left side has %d label(s), right side has %d value(s)',
'unbalanced-tuple-unpacking',
'Used when there is an unbalanced tuple unpacking in assignment',
{'old_names': [('W0632', 'unbalanced-tuple-unpacking')]}),
'E0633': ('Attempting to unpack a non-sequence%s',
'unpacking-non-sequence',
'Used when something which is not '
'a sequence is used in an unpack assignment',
{'old_names': [('W0633', 'unpacking-non-sequence')]}),
'W0640': ('Cell variable %s defined in loop',
'cell-var-from-loop',
'A variable used in a closure is defined in a loop. '
'This will result in all closures using the same value for '
'the closed-over variable.'),
}
ScopeConsumer = collections.namedtuple("ScopeConsumer", "to_consume consumed scope_type")
class NamesConsumer(object):
"""
A simple class to handle consumed, to consume and scope type info of node locals
"""
def __init__(self, node, scope_type):
self._atomic = ScopeConsumer(copy.copy(node.locals), {}, scope_type)
def __repr__(self):
msg = "\nto_consume : {:s}\n".format(
", ".join(["{}->{}".format(key, val)
for key, val in self._atomic.to_consume.items()]))
msg += "consumed : {:s}\n".format(
", ".join(["{}->{}".format(key, val)
for key, val in self._atomic.consumed.items()]))
msg += "scope_type : {:s}\n".format(self._atomic.scope_type)
return msg
def __iter__(self):
return iter(self._atomic)
@property
def to_consume(self):
return self._atomic.to_consume
@property
def consumed(self):
return self._atomic.consumed
@property
def scope_type(self):
return self._atomic.scope_type
def mark_as_consumed(self, name, new_node):
"""
Mark the name as consumed and delete it from
the to_consume dictionnary
"""
self.consumed[name] = new_node
del self.to_consume[name]
def get_next_to_consume(self, node):
# mark the name as consumed if it's defined in this scope
name = node.name
parent_node = node.parent
found_node = self.to_consume.get(name)
if (found_node and isinstance(parent_node, astroid.Assign)
and parent_node == found_node[0].parent):
lhs = found_node[0].parent.targets[0]
if lhs.name == name: # this name is defined in this very statement
found_node = None
return found_node
class VariablesChecker(BaseChecker):
"""checks for
* unused variables / imports
* undefined variables
* redefinition of variable from builtins or from an outer scope
* use of variable before assignment
* __all__ consistency
"""
__implements__ = IAstroidChecker
name = 'variables'
msgs = MSGS
priority = -1
options = (("init-import",
{'default': 0, 'type' : 'yn', 'metavar' : '<y_or_n>',
'help' : 'Tells whether we should check for unused import in '
'__init__ files.'}),
("dummy-variables-rgx",
{'default': '_+$|(_[a-zA-Z0-9_]*[a-zA-Z0-9]+?$)|dummy|^ignored_|^unused_',
'type' :'regexp', 'metavar' : '<regexp>',
'help' : 'A regular expression matching the name of dummy '
'variables (i.e. expectedly not used).'}),
("additional-builtins",
{'default': (), 'type' : 'csv',
'metavar' : '<comma separated list>',
'help' : 'List of additional names supposed to be defined in '
'builtins. Remember that you should avoid to define new builtins '
'when possible.'
}),
("callbacks",
{'default' : ('cb_', '_cb'), 'type' : 'csv',
'metavar' : '<callbacks>',
'help' : 'List of strings which can identify a callback '
'function by name. A callback name must start or '
'end with one of those strings.'}
),
("redefining-builtins-modules",
{'default': ('six.moves', 'past.builtins', 'future.builtins'), 'type': 'csv',
'metavar': '<comma separated list>',
'help': 'List of qualified module names which can have objects '
'that can redefine builtins.'}
),
('ignored-argument-names',
{'default' : IGNORED_ARGUMENT_NAMES,
'type' :'regexp', 'metavar' : '<regexp>',
'help' : 'Argument names that match this expression will be '
'ignored. Default to name with leading underscore'}
),
('allow-global-unused-variables',
{'default': True,
'type': 'yn', 'metavar': '<y_or_n>',
'help': 'Tells whether unused global variables should be treated as a violation.'}
),
)
def __init__(self, linter=None):
BaseChecker.__init__(self, linter)
self._to_consume = None # list of tuples: (to_consume:dict, consumed:dict, scope_type:str)
self._checking_mod_attr = None
self._loop_variables = []
# Relying on other checker's options, which might not have been initialized yet.
@decorators.cachedproperty
def _analyse_fallback_blocks(self):
return get_global_option(self, 'analyse-fallback-blocks', default=False)
@decorators.cachedproperty
def _ignored_modules(self):
return get_global_option(self, 'ignored-modules', default=[])
@decorators.cachedproperty
def _allow_global_unused_variables(self):
return get_global_option(self, 'allow-global-unused-variables', default=True)
@utils.check_messages('redefined-outer-name')
def visit_for(self, node):
assigned_to = [var.name for var in node.target.nodes_of_class(astroid.AssignName)]
# Only check variables that are used
dummy_rgx = self.config.dummy_variables_rgx
assigned_to = [var for var in assigned_to if not dummy_rgx.match(var)]
for variable in assigned_to:
for outer_for, outer_variables in self._loop_variables:
if (variable in outer_variables
and not in_for_else_branch(outer_for, node)):
self.add_message(
'redefined-outer-name',
args=(variable, outer_for.fromlineno),
node=node
)
break
self._loop_variables.append((node, assigned_to))
@utils.check_messages('redefined-outer-name')
def leave_for(self, _):
self._loop_variables.pop()
def visit_module(self, node):
"""visit module : update consumption analysis variable
checks globals doesn't overrides builtins
"""
self._to_consume = [NamesConsumer(node, 'module')]
for name, stmts in six.iteritems(node.locals):
if utils.is_builtin(name) and not utils.is_inside_except(stmts[0]):
if self._should_ignore_redefined_builtin(stmts[0]):
continue
self.add_message('redefined-builtin', args=name, node=stmts[0])
@utils.check_messages('unused-import', 'unused-wildcard-import',
'redefined-builtin', 'undefined-all-variable',
'invalid-all-object', 'unused-variable')
def leave_module(self, node):
"""leave module: check globals
"""
assert len(self._to_consume) == 1
not_consumed = self._to_consume.pop().to_consume
# attempt to check for __all__ if defined
if '__all__' in node.locals:
self._check_all(node, not_consumed)
# check for unused globals
self._check_globals(not_consumed)
# don't check unused imports in __init__ files
if not self.config.init_import and node.package:
return
self._check_imports(not_consumed)
def _check_all(self, node, not_consumed):
assigned = next(node.igetattr('__all__'))
if assigned is astroid.YES:
return
for elt in getattr(assigned, 'elts', ()):
try:
elt_name = next(elt.infer())
except astroid.InferenceError:
continue
if elt_name is astroid.Uninferable:
continue
if not elt_name.parent:
continue
if (not isinstance(elt_name, astroid.Const)
or not isinstance(elt_name.value, six.string_types)):
self.add_message('invalid-all-object',
args=elt.as_string(), node=elt)
continue
elt_name = elt_name.value
# If elt is in not_consumed, remove it from not_consumed
if elt_name in not_consumed:
del not_consumed[elt_name]
continue
if elt_name not in node.locals:
if not node.package:
self.add_message('undefined-all-variable',
args=(elt_name, ),
node=elt)
else:
basename = os.path.splitext(node.file)[0]
if os.path.basename(basename) == '__init__':
name = node.name + "." + elt_name
try:
modutils.file_from_modpath(name.split("."))
except ImportError:
self.add_message('undefined-all-variable',
args=(elt_name, ),
node=elt)
except SyntaxError:
# don't yield an syntax-error warning,
# because it will be later yielded
# when the file will be checked
pass
def _check_globals(self, not_consumed):
if self._allow_global_unused_variables:
return
for name, nodes in six.iteritems(not_consumed):
for node in nodes:
self.add_message('unused-variable', args=(name,), node=node)
def _check_imports(self, not_consumed):
local_names = _fix_dot_imports(not_consumed)
checked = set()
for name, stmt in local_names:
for imports in stmt.names:
real_name = imported_name = imports[0]
if imported_name == "*":
real_name = name
as_name = imports[1]
if real_name in checked:
continue
if name not in (real_name, as_name):
continue
checked.add(real_name)
if (isinstance(stmt, astroid.Import) or
(isinstance(stmt, astroid.ImportFrom) and
not stmt.modname)):
if (isinstance(stmt, astroid.ImportFrom) and
SPECIAL_OBJ.search(imported_name)):
# Filter special objects (__doc__, __all__) etc.,
# because they can be imported for exporting.
continue
if as_name == "_":
continue
if as_name is None:
msg = "import %s" % imported_name
else:
msg = "%s imported as %s" % (imported_name, as_name)
self.add_message('unused-import', args=msg, node=stmt)
elif (isinstance(stmt, astroid.ImportFrom)
and stmt.modname != FUTURE):
if SPECIAL_OBJ.search(imported_name):
# Filter special objects (__doc__, __all__) etc.,
# because they can be imported for exporting.
continue
if _is_from_future_import(stmt, name):
# Check if the name is in fact loaded from a
# __future__ import in another module.
continue
if imported_name == '*':
self.add_message('unused-wildcard-import',
args=name, node=stmt)
else:
if as_name is None:
msg = "%s imported from %s" % (imported_name, stmt.modname)
else:
fields = (imported_name, stmt.modname, as_name)
msg = "%s imported from %s as %s" % fields
self.add_message('unused-import', args=msg, node=stmt)
del self._to_consume
def visit_classdef(self, node):
"""visit class: update consumption analysis variable
"""
self._to_consume.append(NamesConsumer(node, 'class'))
def leave_classdef(self, _):
"""leave class: update consumption analysis variable
"""
# do not check for not used locals here (no sense)
self._to_consume.pop()
def visit_lambda(self, node):
"""visit lambda: update consumption analysis variable
"""
self._to_consume.append(NamesConsumer(node, 'lambda'))
def leave_lambda(self, _):
"""leave lambda: update consumption analysis variable
"""
# do not check for not used locals here
self._to_consume.pop()
def visit_generatorexp(self, node):
"""visit genexpr: update consumption analysis variable
"""
self._to_consume.append(NamesConsumer(node, 'comprehension'))
def leave_generatorexp(self, _):
"""leave genexpr: update consumption analysis variable
"""
# do not check for not used locals here
self._to_consume.pop()
def visit_dictcomp(self, node):
"""visit dictcomp: update consumption analysis variable
"""
self._to_consume.append(NamesConsumer(node, 'comprehension'))
def leave_dictcomp(self, _):
"""leave dictcomp: update consumption analysis variable
"""
# do not check for not used locals here
self._to_consume.pop()
def visit_setcomp(self, node):
"""visit setcomp: update consumption analysis variable
"""
self._to_consume.append(NamesConsumer(node, 'comprehension'))
def leave_setcomp(self, _):
"""leave setcomp: update consumption analysis variable
"""
# do not check for not used locals here
self._to_consume.pop()
def visit_functiondef(self, node):
"""visit function: update consumption analysis variable and check locals
"""
self._to_consume.append(NamesConsumer(node, 'function'))
if not (self.linter.is_message_enabled('redefined-outer-name') or
self.linter.is_message_enabled('redefined-builtin')):
return
globs = node.root().globals
for name, stmt in node.items():
if utils.is_inside_except(stmt):
continue
if name in globs and not isinstance(stmt, astroid.Global):
definition = globs[name][0]
if (isinstance(definition, astroid.ImportFrom)
and definition.modname == FUTURE):
# It is a __future__ directive, not a symbol.
continue
line = definition.fromlineno
dummy_rgx = self.config.dummy_variables_rgx
if not dummy_rgx.match(name):
self.add_message('redefined-outer-name',
args=(name, line), node=stmt)
elif utils.is_builtin(name) and not self._should_ignore_redefined_builtin(stmt):
# do not print Redefining builtin for additional builtins
self.add_message('redefined-builtin', args=name, node=stmt)
def _is_name_ignored(self, stmt, name):
authorized_rgx = self.config.dummy_variables_rgx
if (isinstance(stmt, astroid.AssignName)
and isinstance(stmt.parent, astroid.Arguments)):
regex = self.config.ignored_argument_names
else:
regex = authorized_rgx
return regex and regex.match(name)
def _check_is_unused(self, name, node, stmt, global_names, nonlocal_names):
# Ignore some special names specified by user configuration.
if self._is_name_ignored(stmt, name):
return
# Ignore names that were added dynamically to the Function scope
if (isinstance(node, astroid.FunctionDef)
and name == '__class__'
and len(node.locals['__class__']) == 1
and isinstance(node.locals['__class__'][0], astroid.ClassDef)):
return
# Ignore names imported by the global statement.
# FIXME: should only ignore them if it's assigned latter
if isinstance(stmt, astroid.Global):
return
if isinstance(stmt, (astroid.Import, astroid.ImportFrom)):
# Detect imports, assigned to global statements.
if global_names and _import_name_is_global(stmt, global_names):
return
argnames = list(itertools.chain(
node.argnames(),
[arg.name for arg in node.args.kwonlyargs]
))
is_method = node.is_method()
klass = node.parent.frame()
if is_method and isinstance(klass, astroid.ClassDef):
confidence = INFERENCE if utils.has_known_bases(klass) else INFERENCE_FAILURE
else:
confidence = HIGH
# Care about functions with unknown argument (builtins)
if name in argnames:
if is_method:
# Don't warn for the first argument of a (non static) method
if node.type != 'staticmethod' and name == argnames[0]:
return
# Don't warn for argument of an overridden method
overridden = overridden_method(klass, node.name)
if overridden is not None and name in overridden.argnames():
return
if node.name in utils.PYMETHODS and node.name not in ('__init__', '__new__'):
return
# Don't check callback arguments
if any(node.name.startswith(cb) or node.name.endswith(cb)
for cb in self.config.callbacks):
return
# Don't check arguments of singledispatch.register function.
if utils.is_registered_in_singledispatch_function(node):
return
self.add_message('unused-argument', args=name, node=stmt,
confidence=confidence)
else:
if stmt.parent and isinstance(stmt.parent, astroid.Assign):
if name in nonlocal_names:
return
if isinstance(stmt, astroid.Import):
# Need the complete name, which we don't have in .locals.
qname, asname = stmt.names[0]
name = asname or qname
self.add_message('unused-variable', args=name, node=stmt)
def leave_functiondef(self, node):
"""leave function: check function's locals are consumed"""
not_consumed = self._to_consume.pop().to_consume
if not (self.linter.is_message_enabled('unused-variable') or
self.linter.is_message_enabled('unused-argument')):
return
# Don't check arguments of function which are only raising an exception.
if utils.is_error(node):
return
# Don't check arguments of abstract methods or within an interface.
is_method = node.is_method()
if is_method and node.is_abstract():
return
global_names = _flattened_scope_names(node.nodes_of_class(astroid.Global))
nonlocal_names = _flattened_scope_names(node.nodes_of_class(astroid.Nonlocal))
for name, stmts in six.iteritems(not_consumed):
self._check_is_unused(name, node, stmts[0], global_names, nonlocal_names)
visit_asyncfunctiondef = visit_functiondef
leave_asyncfunctiondef = leave_functiondef
@utils.check_messages('global-variable-undefined', 'global-variable-not-assigned',
'global-statement', 'global-at-module-level',
'redefined-builtin')
def visit_global(self, node):
"""check names imported exists in the global scope"""
frame = node.frame()
if isinstance(frame, astroid.Module):
self.add_message('global-at-module-level', node=node)
return
module = frame.root()
default_message = True
for name in node.names:
try:
assign_nodes = module.getattr(name)
except astroid.NotFoundError:
# unassigned global, skip
assign_nodes = []
if not assign_nodes:
self.add_message('global-variable-not-assigned',
args=name, node=node)
default_message = False
continue
for anode in assign_nodes:
if (isinstance(anode, astroid.AssignName)
and anode.name in module.special_attributes):
self.add_message('redefined-builtin', args=name, node=node)
break
if anode.frame() is module:
# module level assignment
break
else:
# global undefined at the module scope
self.add_message('global-variable-undefined', args=name, node=node)
default_message = False
if default_message:
self.add_message('global-statement', node=node)
def _check_late_binding_closure(self, node, assignment_node):
def _is_direct_lambda_call():
return (isinstance(node_scope.parent, astroid.Call)
and node_scope.parent.func is node_scope)
node_scope = node.scope()
if not isinstance(node_scope, (astroid.Lambda, astroid.FunctionDef)):
return
if isinstance(node.parent, astroid.Arguments):
return
if isinstance(assignment_node, astroid.Comprehension):
if assignment_node.parent.parent_of(node.scope()):
self.add_message('cell-var-from-loop', node=node, args=node.name)
else:
assign_scope = assignment_node.scope()
maybe_for = assignment_node
while not isinstance(maybe_for, astroid.For):
if maybe_for is assign_scope:
break
maybe_for = maybe_for.parent
else:
if (maybe_for.parent_of(node_scope)
and not _is_direct_lambda_call()
and not isinstance(node_scope.statement(), astroid.Return)):
self.add_message('cell-var-from-loop', node=node, args=node.name)
def _loopvar_name(self, node, name):
# filter variables according to node's scope
# XXX used to filter parents but don't remember why, and removing this
# fixes a W0631 false positive reported by Paul Hachmann on 2008/12 on
# python-projects (added to func_use_for_or_listcomp_var test)
#astmts = [stmt for stmt in node.lookup(name)[1]
# if hasattr(stmt, 'ass_type')] and
# not stmt.statement().parent_of(node)]
if not self.linter.is_message_enabled('undefined-loop-variable'):
return
astmts = [stmt for stmt in node.lookup(name)[1]
if hasattr(stmt, 'ass_type')]
# filter variables according their respective scope test is_statement
# and parent to avoid #74747. This is not a total fix, which would
# introduce a mechanism similar to special attribute lookup in
# modules. Also, in order to get correct inference in this case, the
# scope lookup rules would need to be changed to return the initial
# assignment (which does not exist in code per se) as well as any later
# modifications.
if not astmts or (astmts[0].is_statement or astmts[0].parent) \
and astmts[0].statement().parent_of(node):
_astmts = []
else:
_astmts = astmts[:1]
for i, stmt in enumerate(astmts[1:]):
if (astmts[i].statement().parent_of(stmt)
and not in_for_else_branch(astmts[i].statement(), stmt)):
continue
_astmts.append(stmt)
astmts = _astmts
if len(astmts) == 1:
assign = astmts[0].assign_type()
if (isinstance(assign, (astroid.For, astroid.Comprehension,
astroid.GeneratorExp))
and assign.statement() is not node.statement()):
self.add_message('undefined-loop-variable', args=name, node=node)
def _should_ignore_redefined_builtin(self, stmt):
if not isinstance(stmt, astroid.ImportFrom):
return False
return stmt.modname in self.config.redefining_builtins_modules
@utils.check_messages('redefine-in-handler')
def visit_excepthandler(self, node):
for name in utils.get_all_elements(node.name):
clobbering, args = utils.clobber_in_except(name)
if clobbering:
self.add_message('redefine-in-handler', args=args, node=name)
def visit_assignname(self, node):
if isinstance(node.assign_type(), astroid.AugAssign):
self.visit_name(node)
def visit_delname(self, node):
self.visit_name(node)
@staticmethod
def _defined_in_function_definition(node, frame):
in_annotation_or_default = False
if (isinstance(frame, astroid.FunctionDef) and
node.statement() is frame):
in_annotation_or_default = (
(
PY3K and (node in frame.args.annotations
or node in frame.args.kwonlyargs_annotations
or node is frame.args.varargannotation
or node is frame.args.kwargannotation)
)
or
frame.args.parent_of(node)
)
return in_annotation_or_default
@staticmethod
def _is_variable_violation(node, name, defnode, stmt, defstmt,
frame, defframe, base_scope_type,
recursive_klass):
maybee0601 = True
annotation_return = False
use_outer_definition = False
if frame is not defframe:
maybee0601 = _detect_global_scope(node, frame, defframe)
elif defframe.parent is None:
# we are at the module level, check the name is not
# defined in builtins
if name in defframe.scope_attrs or astroid.builtin_lookup(name)[1]:
maybee0601 = False
else:
# we are in a local scope, check the name is not
# defined in global or builtin scope
# skip this lookup if name is assigned later in function scope
forbid_lookup = isinstance(frame, astroid.FunctionDef) and _assigned_locally(node)
if not forbid_lookup and defframe.root().lookup(name)[1]:
maybee0601 = False
use_outer_definition = (
stmt == defstmt
and not isinstance(defnode, astroid.node_classes.Comprehension)
)
else:
# check if we have a nonlocal
if name in defframe.locals:
maybee0601 = not any(isinstance(child, astroid.Nonlocal)
and name in child.names
for child in defframe.get_children())
if (base_scope_type == 'lambda' and
isinstance(frame, astroid.ClassDef)
and name in frame.locals):
# This rule verifies that if the definition node of the
# checked name is an Arguments node and if the name
# is used a default value in the arguments defaults
# and the actual definition of the variable label
# is happening before the Arguments definition.
#
# bar = None
# foo = lambda bar=bar: bar
#
# In this case, maybee0601 should be False, otherwise
# it should be True.
maybee0601 = not (isinstance(defnode, astroid.Arguments) and
node in defnode.defaults and
frame.locals[name][0].fromlineno < defstmt.fromlineno)
elif (isinstance(defframe, astroid.ClassDef) and
isinstance(frame, astroid.FunctionDef)):
# Special rule for function return annotations,
# which uses the same name as the class where
# the function lives.
if (PY3K and node is frame.returns and
defframe.parent_of(frame.returns)):
maybee0601 = annotation_return = True
if (maybee0601 and defframe.name in defframe.locals and
defframe.locals[name][0].lineno < frame.lineno):
# Detect class assignments with the same
# name as the class. In this case, no warning
# should be raised.
maybee0601 = False
if isinstance(node.parent, astroid.Arguments):
maybee0601 = stmt.fromlineno <= defstmt.fromlineno
elif recursive_klass:
maybee0601 = True
else:
maybee0601 = maybee0601 and stmt.fromlineno <= defstmt.fromlineno
if maybee0601 and stmt.fromlineno == defstmt.fromlineno:
if (isinstance(defframe, astroid.FunctionDef)
and frame is defframe
and defframe.parent_of(node)
and stmt is not defstmt):
# Single statement function, with the statement on the
# same line as the function definition
maybee0601 = False
return maybee0601, annotation_return, use_outer_definition
def _ignore_class_scope(self, node):
"""
Return True if the node is in a local class scope, as an assignment.
:param node: Node considered
:type node: astroid.Node
:return: True if the node is in a local class scope, as an assignment. False otherwise.
:rtype: bool
"""
# Detect if we are in a local class scope, as an assignment.
# For example, the following is fair game.
#
# class A:
# b = 1
# c = lambda b=b: b * b
#
# class B:
# tp = 1
# def func(self, arg: tp):
# ...
# class C:
# tp = 2
# def func(self, arg=tp):
# ...
name = node.name
frame = node.statement().scope()
in_annotation_or_default = self._defined_in_function_definition(node, frame)
if in_annotation_or_default:
frame_locals = frame.parent.scope().locals
else:
frame_locals = frame.locals
return not ((isinstance(frame, astroid.ClassDef) or in_annotation_or_default) and
name in frame_locals)
@utils.check_messages(*(MSGS.keys()))
def visit_name(self, node):
"""check that a name is defined if the current scope and doesn't
redefine a built-in
"""
stmt = node.statement()
if stmt.fromlineno is None:
# name node from a astroid built from live code, skip
assert not stmt.root().file.endswith('.py')
return
name = node.name
frame = stmt.scope()
# if the name node is used as a function default argument's value or as
# a decorator, then start from the parent frame of the function instead
# of the function frame - and thus open an inner class scope
if (utils.is_func_default(node) or utils.is_func_decorator(node)
or utils.is_ancestor_name(frame, node)):
start_index = len(self._to_consume) - 2
else:
start_index = len(self._to_consume) - 1
# iterates through parent scopes, from the inner to the outer
base_scope_type = self._to_consume[start_index].scope_type
# pylint: disable=too-many-nested-blocks; refactoring this block is a pain.
for i in range(start_index, -1, -1):
current_consumer = self._to_consume[i]
# if the current scope is a class scope but it's not the inner
# scope, ignore it. This prevents to access this scope instead of
# the globals one in function members when there are some common
# names. The only exception is when the starting scope is a
# comprehension and its direct outer scope is a class
if current_consumer.scope_type == 'class' and i != start_index and not (
base_scope_type == 'comprehension' and i == start_index-1):
if self._ignore_class_scope(node):
continue
# the name has already been consumed, only check it's not a loop
# variable used outside the loop
# avoid the case where there are homonyms inside function scope and
# comprehension current scope (avoid bug #1731)
if name in current_consumer.consumed and not (
current_consumer.scope_type == 'comprehension'
and self._has_homonym_in_upper_function_scope(node, i)):
defnode = utils.assign_parent(current_consumer.consumed[name][0])
self._check_late_binding_closure(node, defnode)
self._loopvar_name(node, name)
break
found_node = current_consumer.get_next_to_consume(node)
if found_node is None:
continue
# checks for use before assignment
defnode = utils.assign_parent(current_consumer.to_consume[name][0])
if defnode is not None:
self._check_late_binding_closure(node, defnode)
defstmt = defnode.statement()
defframe = defstmt.frame()
# The class reuses itself in the class scope.
recursive_klass = (frame is defframe and
defframe.parent_of(node) and
isinstance(defframe, astroid.ClassDef) and
node.name == defframe.name)
maybee0601, annotation_return, use_outer_definition = self._is_variable_violation(
node, name, defnode, stmt, defstmt,
frame, defframe,
base_scope_type, recursive_klass)
if use_outer_definition:
continue
if (maybee0601
and not utils.is_defined_before(node)
and not astroid.are_exclusive(stmt, defstmt, ('NameError',))):
# Used and defined in the same place, e.g `x += 1` and `del x`
defined_by_stmt = (
defstmt is stmt
and isinstance(node, (astroid.DelName, astroid.AssignName))
)
if (recursive_klass
or defined_by_stmt
or annotation_return
or isinstance(defstmt, astroid.Delete)):
if not utils.node_ignores_exception(node, NameError):
self.add_message('undefined-variable', args=name,
node=node)
elif base_scope_type != 'lambda':
# E0601 may *not* occurs in lambda scope.
self.add_message('used-before-assignment', args=name, node=node)
elif base_scope_type == 'lambda':
# E0601 can occur in class-level scope in lambdas, as in
# the following example:
# class A:
# x = lambda attr: f + attr
# f = 42
if isinstance(frame, astroid.ClassDef) and name in frame.locals:
if isinstance(node.parent, astroid.Arguments):
if stmt.fromlineno <= defstmt.fromlineno:
# Doing the following is fine:
# class A:
# x = 42
# y = lambda attr=x: attr
self.add_message('used-before-assignment',
args=name, node=node)
else:
self.add_message('undefined-variable',
args=name, node=node)
elif current_consumer.scope_type == 'lambda':
self.add_message('undefined-variable',
node=node, args=name)
current_consumer.mark_as_consumed(name, found_node)
# check it's not a loop variable used outside the loop
self._loopvar_name(node, name)
break
else:
# we have not found the name, if it isn't a builtin, that's an
# undefined name !
if not (name in astroid.Module.scope_attrs or utils.is_builtin(name)
or name in self.config.additional_builtins):
if not utils.node_ignores_exception(node, NameError):
self.add_message('undefined-variable', args=name, node=node)
def _has_homonym_in_upper_function_scope(self, node, index):
"""
Return True if there is a node with the same name in the to_consume dict of an upper scope
and if that scope is a function
:param node: node to check for
:type node: astroid.Node
:param index: index of the current consumer inside self._to_consume
:type index: int
:return: True if there is a node with the same name in the to_consume dict of a upper scope
and if that scope is a function
:rtype: bool
"""
for _consumer in self._to_consume[index-1::-1]:
if _consumer.scope_type == 'function' and node.name in _consumer.to_consume:
return True
return False
@utils.check_messages('no-name-in-module')
def visit_import(self, node):
"""check modules attribute accesses"""
if not self._analyse_fallback_blocks and utils.is_from_fallback_block(node):
# No need to verify this, since ImportError is already
# handled by the client code.
return
for name, _ in node.names:
parts = name.split('.')
try:
module = next(node.infer_name_module(parts[0]))
except astroid.ResolveError:
continue
self._check_module_attrs(node, module, parts[1:])
@utils.check_messages('no-name-in-module')
def visit_importfrom(self, node):
"""check modules attribute accesses"""
if not self._analyse_fallback_blocks and utils.is_from_fallback_block(node):
# No need to verify this, since ImportError is already
# handled by the client code.
return
name_parts = node.modname.split('.')
try:
module = node.do_import_module(name_parts[0])
except astroid.AstroidBuildingException:
return
module = self._check_module_attrs(node, module, name_parts[1:])
if not module:
return
for name, _ in node.names:
if name == '*':
continue
self._check_module_attrs(node, module, name.split('.'))
@utils.check_messages('unbalanced-tuple-unpacking', 'unpacking-non-sequence')
def visit_assign(self, node):
"""Check unbalanced tuple unpacking for assignments
and unpacking non-sequences.
"""
if not isinstance(node.targets[0], (astroid.Tuple, astroid.List)):
return
targets = node.targets[0].itered()
try:
infered = utils.safe_infer(node.value)
if infered is not None:
self._check_unpacking(infered, node, targets)
except astroid.InferenceError:
return
def _check_unpacking(self, infered, node, targets):
""" Check for unbalanced tuple unpacking
and unpacking non sequences.
"""
if utils.is_inside_abstract_class(node):
return
if utils.is_comprehension(node):
return
if infered is astroid.YES:
return
if (isinstance(infered.parent, astroid.Arguments) and
isinstance(node.value, astroid.Name) and
node.value.name == infered.parent.vararg):
# Variable-length argument, we can't determine the length.
return
if isinstance(infered, (astroid.Tuple, astroid.List)):
# attempt to check unpacking is properly balanced
values = infered.itered()
if len(targets) != len(values):
# Check if we have starred nodes.
if any(isinstance(target, astroid.Starred)
for target in targets):
return
self.add_message('unbalanced-tuple-unpacking', node=node,
args=(_get_unpacking_extra_info(node, infered),
len(targets),
len(values)))
# attempt to check unpacking may be possible (ie RHS is iterable)
else:
if not utils.is_iterable(infered):
self.add_message('unpacking-non-sequence', node=node,
args=(_get_unpacking_extra_info(node, infered),))
def _check_module_attrs(self, node, module, module_names):
"""check that module_names (list of string) are accessible through the
given module
if the latest access name corresponds to a module, return it
"""
assert isinstance(module, astroid.Module), module
while module_names:
name = module_names.pop(0)
if name == '__dict__':
module = None
break
try:
module = next(module.getattr(name)[0].infer())
if module is astroid.Uninferable:
return None
except astroid.NotFoundError:
if module.name in self._ignored_modules:
return None
self.add_message('no-name-in-module',
args=(name, module.name), node=node)
return None
except astroid.InferenceError:
return None
if module_names:
# FIXME: other message if name is not the latest part of
# module_names ?
modname = module.name if module else '__dict__'
self.add_message('no-name-in-module', node=node,
args=('.'.join(module_names), modname))
return None
if isinstance(module, astroid.Module):
return module
return None
class VariablesChecker3k(VariablesChecker):
'''Modified variables checker for 3k'''
# listcomp have now also their scope
def visit_listcomp(self, node):
"""visit dictcomp: update consumption analysis variable
"""
self._to_consume.append(NamesConsumer(node, 'comprehension'))
def leave_listcomp(self, _):
"""leave dictcomp: update consumption analysis variable
"""
# do not check for not used locals here
self._to_consume.pop()
def leave_functiondef(self, node):
self._check_metaclasses(node)
super(VariablesChecker3k, self).leave_functiondef(node)
def leave_module(self, node):
self._check_metaclasses(node)
super(VariablesChecker3k, self).leave_module(node)
def _check_metaclasses(self, node):
""" Update consumption analysis for metaclasses. """
consumed = [] # [(scope_locals, consumed_key)]
for child_node in node.get_children():
if isinstance(child_node, astroid.ClassDef):
consumed.extend(self._check_classdef_metaclasses(child_node, node))
# Pop the consumed items, in order to avoid having
# unused-import and unused-variable false positives
for scope_locals, name in consumed:
scope_locals.pop(name, None)
def _check_classdef_metaclasses(self, klass, parent_node):
if not klass._metaclass:
# Skip if this class doesn't use explicitly a metaclass, but inherits it from ancestors
return []
consumed = [] # [(scope_locals, consumed_key)]
metaclass = klass.metaclass()
name = None
if isinstance(klass._metaclass, astroid.Name):
name = klass._metaclass.name
elif metaclass:
name = metaclass.root().name
found = None
if name:
# check enclosing scopes starting from most local
for scope_locals, _, _ in self._to_consume[::-1]:
found = scope_locals.get(name)
if found:
consumed.append((scope_locals, name))
break
if found is None and not metaclass:
name = None
if isinstance(klass._metaclass, astroid.Name):
name = klass._metaclass.name
elif isinstance(klass._metaclass, astroid.Attribute):
name = klass._metaclass.as_string()
if name is not None:
if not (name in astroid.Module.scope_attrs or
utils.is_builtin(name) or
name in self.config.additional_builtins or
name in parent_node.locals):
self.add_message('undefined-variable',
node=klass,
args=(name,))
return consumed
if sys.version_info >= (3, 0):
VariablesChecker = VariablesChecker3k
def register(linter):
"""required method to auto register this checker"""
linter.register_checker(VariablesChecker(linter))
|
lucidmotifs/auto-aoc
|
.venv/lib/python3.5/site-packages/pylint/checkers/variables.py
|
Python
|
mit
| 60,718
|
[
"VisIt"
] |
daa4151f62a016a67f0f5ca658b2de470a72adb51d663b7e64060463def31cc6
|
from asap3 import *
from ase.lattice.cubic import FaceCenteredCubic
from asap3.testtools import *
from numpy import *
import ase.data
def TestLists(nblist, fnb, name, count=None):
"Run the tests on a half and a full neighbor list."
print ""
if count:
print "Testing %s: Length of lists" % (name,)
sum = 0
for lst in nblist:
sum += len(lst)
ReportTest(" Half list", sum, count*len(atoms), 0)
lfnb = map(len, fnb)
assert len(lfnb) == len(atoms)
ReportTest(" Shortest full list", min(lfnb), 2*count, 0)
ReportTest(" Longest full list", max(lfnb), 2*count, 0)
print ("Testing %s: Symmetry of full list; full list atoms on half-lists."
% (name,))
for i, nb in enumerate(fnb):
for jj in nb:
j = int(jj)
ReportTest.BoolTest("Atom %d on list %d" % (j, i), i in fnb[j],
silent=True)
ReportTest.BoolTest("Exactly one of atoms %d and %d on half-lists"
% (j, i),
(i in nblist[j]) != (j in nblist[i]),
silent=True)
if ReportTest.GetNumberOfErrors() > 10:
print "*** Too many errors - giving up! ***"
break
print "Testing %s: Half-list atoms on full list." % (name,)
for i, nb in enumerate(nblist):
for jj in nb:
j = int(jj)
ReportTest.BoolTest("Atom %d on list %d (forward)" % (j, i),
j in fnb[i], silent=True)
ReportTest.BoolTest("Atom %d on list %d (reverse)" % (i, j),
i in fnb[j], silent=True)
if ReportTest.GetNumberOfErrors() > 10:
print "*** Too many errors - giving up! ***"
break
print_version(1)
element = "Cu"
latconst = ase.data.reference_states[ase.data.atomic_numbers[element]]['a']
atoms = FaceCenteredCubic(directions=[[1,0,0],[0,1,1],[0,0,1]], size=(9,7,5),
symbol=element, debug=0)
atoms.set_calculator(EMT(minimum_image=True))
epot = atoms.get_potential_energy()
nblist = atoms.get_calculator().get_neighborlist()
count = {}
for lst in nblist:
n = len(lst)
try:
count[n] += 1
except KeyError:
count[n] = 1
# print "Histogram:"
numbers = count.keys()
numbers.sort()
sum = 0
for i in numbers:
#print i, count[i]
sum += i*count[i]
ReportTest("Number of neighbors (EMT's NB list)", sum, 21*len(atoms), 0)
nblist = NeighborList(latconst * 0.5 * (1/sqrt(2) + 1), atoms, 0.0)
#nblist = NeighborCellLocator(latconst * 0.5 * (1/sqrt(2) + 1), atoms, 0.0)
fnb = FullNeighborList(latconst * 0.5 * (1/sqrt(2) + 1), Atoms(atoms))
TestLists(nblist, fnb, "nearest-neigbor lists (periodic)", 6)
ReportTest("Energy unperturbed 1", atoms.get_potential_energy(), epot, 1e-11)
atoms.set_positions(atoms.get_positions())
ReportTest("Energy unperturbed 2", atoms.get_potential_energy(), epot, 1e-11)
nblist = NeighborList(4.98409, atoms, 0.0)
fnb = FullNeighborList(4.98409, Atoms(atoms))
TestLists(nblist, fnb, "long neigbor lists (periodic)", 21)
ReportTest("Energy unperturbed 3", atoms.get_potential_energy(), epot, 1e-11)
atoms.set_positions(atoms.get_positions())
ReportTest("Energy unperturbed 4", atoms.get_potential_energy(), epot, 1e-11)
atoms = Atoms(atoms, pbc=(0,0,0))
nblist = NeighborList(latconst * 0.5 * (1/sqrt(2) + 1), atoms, 0.0)
fnb = FullNeighborList(latconst * 0.5 * (1/sqrt(2) + 1), Atoms(atoms))
TestLists(nblist, fnb, "nearest-neigbor lists (non-periodic)")
atoms = Atoms(atoms, pbc=(0,1,0))
nblist = NeighborList(4.98409, atoms, 0.0)
fnb = FullNeighborList(4.98409, Atoms(atoms))
TestLists(nblist, fnb, "long neigbor lists (semi-periodic)")
ReportTest.Summary()
|
auag92/n2dm
|
Asap-3.8.4/Test/NeighborList.py
|
Python
|
mit
| 3,822
|
[
"ASE"
] |
e3758bfe38295ad6f7bb2f2a8ae67d56ff03be2e19c89ee4a88049467f05eaa6
|
from serverus.models import *
from django.http import HttpResponse, HttpResponseRedirect
from django.template.response import TemplateResponse
from django.template import RequestContext
from django.views.decorators.csrf import csrf_protect
from django.contrib.humanize.templatetags import humanize
from django.db.models import Q
import random, string, smtplib, math, json
from datetime import datetime, timedelta
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
import ldap
import ldap.filter
def readMITCert(env):
email = env.get('SSL_CLIENT_S_DN_Email', None)
if email:
username, domain = email.split("@")
assert domain.upper()=="MIT.EDU"
name=env.get('SSL_CLIENT_S_DN_CN', None)
names=name.replace(".","").split(" ")
first=names[0]
last=" ".join(names[2:]) if(len(names[1])==1) else " ".join(names[1:])
fullname=first + " " + last
return (email.lower(), username, first, fullname)
return False
def forbid(request):
quotebucket=[("All students are to return to their dormitories immediately.","This way to the Forbidden Forest."),
("Stop acting like you're the Chosen One.", "But I <i>am</i> the Chosen One."),
("Identification...?","I hardly think that's necessary. I wish to enter my vault."),
("People might think you're... up to something...","I solemnly swear that I am up to no good."),
("The door's locked!","...oh, <b>move over!</b> <i>Alohomora!</i>"),
("Password?",random.choice(["Caput Draconis","Fortuna Major"])),
("You're not allowed to use magic outside school.","<i>Expecto Patronum!</i>"),
("We've looked a hundred times.","Not in the Restricted Section."),
#("Turn out your pockets! ...What's this?","It's just a spare bit of parchment."),
#("Fear of a name only increases fear of the thing itself.","I Am Lord Voldemort."),
#("I don't go looking for trouble. Trouble usually finds me.","<i>Accio Trouble!</i>"),
#("To the well-organized mind, death is but the next great adventure.","<i>Avada Kedavra!</i>"),
]
quotes=random.choice(quotebucket)
return TemplateResponse(request, '403.html', {'quote1': quotes[0], 'quote2': quotes[1]}, status=403)
@csrf_protect
def home(request):
cert = readMITCert(request.environ)
if not cert:
return forbid(request)
(email, username, first, fullname) = cert
try:
user = User.objects.get(email=email)
if not user.enabled:
return TemplateResponse(request, 'expelled.html', {'firstname': first})
except User.DoesNotExist:
return TemplateResponse(request, 'agreement.html', {'name': fullname})
if user.building:
loc = (str(user.building.name) if user.building.name else "Building " + str(user.building.number)) + " - Floor " + str(user.floor)
else:
loc = "Location Unknown"
return TemplateResponse(request, 'home.html', {'location': loc, 'u': user, 'faves': getFaves(user)})
def privacy(request):
# cert = readMITCert(request.environ)
# if not cert:
# return TemplateResponse(request, '403.html')
return TemplateResponse(request, 'privacy.html')
def intro(request):
cert = readMITCert(request.environ)
if not cert:
return forbid(request)
(email, username, first, fullname) = cert
try:
user = User.objects.get(email=email)
if not user.enabled:
return TemplateResponse(request, 'expelled.html', {'firstname': first})
except User.DoesNotExist:
return HttpResponseRedirect("/")
return TemplateResponse(request, 'intro.html', {'firstname': first})
def mitmap(request):
return TemplateResponse(request, 'map.html')
def me(request):
cert = readMITCert(request.environ)
if not cert:
return forbid(request)
(email, username, first, fullname) = cert
try:
user = User.objects.get(email=email)
if not user.enabled:
return TemplateResponse(request, 'expelled.html', {'firstname': first})
except User.DoesNotExist:
return HttpResponseRedirect("/")
return TemplateResponse(request, 'me.html', {'krbuser': username, 'userdata': user})
def locuser(request, userid=None):
cert = readMITCert(request.environ)
if not cert:
return forbid(request)
(email, username, first, fullname) = cert
try:
user = User.objects.get(email=email)
if not user.enabled:
return TemplateResponse(request, 'expelled.html', {'firstname': first})
user2 = User.objects.get(pk=userid)
except User.DoesNotExist:
return HttpResponseRedirect("/")
if user2.sharing==0 and user not in user2.blocks.all():
return TemplateResponse(request, 'me.html', {'userid': userid, 'userdata': user2})
elif user2.sharing==1 and user in user2.shares.all():
return TemplateResponse(request, 'me.html', {'userid': userid, 'userdata': user2})
else:
return HttpResponseRedirect("/me")
def building(request, number=None):
cert = readMITCert(request.environ)
if not cert:
return forbid(request)
(email, username, first, fullname) = cert
try:
user = User.objects.get(email=email)
if not user.enabled:
return TemplateResponse(request, 'expelled.html', {'firstname': first})
building = Building.objects.get(number=number)
except User.DoesNotExist:
return HttpResponseRedirect("/")
#except Building.DoesNotExist:
#return HttpResponseRedirect("/buildings/")
return TemplateResponse(request, 'building.html', {'building': building})
@csrf_protect
def new(request):
if request.method == 'POST':
cert = readMITCert(request.environ)
if not cert:
return forbid(request)
(email, username, first, fullname) = cert
try:
user = User.objects.get(email = email)
except User.DoesNotExist:
u=User(name = fullname,email = email)
u.save()
return HttpResponseRedirect("/add-device")
return HttpResponseRedirect("/")
@csrf_protect
def addDevice(request):
cert = readMITCert(request.environ)
if not cert:
return forbid(request)
(email, username, first, fullname) = cert
try:
user = User.objects.get(email=email)
if not user.enabled:
return TemplateResponse(request, 'expelled.html', {'firstname': first})
except User.DoesNotExist:
return HttpResponseRedirect("/")
if request.method == 'GET':
if not user.deviceid:
return TemplateResponse(request, 'addDevice.html', {'userdata': user, 'firstname': first})
else:
return HttpResponseRedirect("/settings")
if request.method == 'POST' and "devcode" in request.POST.keys():
devcode=request.POST.get("devcode")
if len(devcode) == 6:
if not user.deviceid: #only add, don't update deviceid
try:
otp = AuthToken.objects.get(otp=devcode)
except AuthToken.DoesNotExist:
return TemplateResponse(request, 'addDevice.html', {'userdata': user, 'firstname': first, 'error': ", <span id='error' style='display:none;color:#B30000;'>correctly this time:</span>"})
user.deviceid=otp.deviceid
user.save()
otp.delete()
return HttpResponseRedirect("/intro")
return TemplateResponse(request, 'addDevice.html', {'userdata': user, 'firstname': first})
def android(request):
try:
user = User.objects.get(deviceid=request.POST.get("uuid",None))
except User.DoesNotExist:
return HttpResponse("Invalid request.")
#return TemplateResponse(request, 'new.html', {'name': fullname})
return TemplateResponse(request, 'android.html', {'firstname': user.name.split(" ")[0]})
def certOTP(request, otp=None):
cert = readMITCert(request.environ)
if cert:
(email, username, first, fullname) = cert
try:
user = User.objects.get(email=email)
if not user.enabled:
return
except User.DoesNotExist:
return HttpResponseRedirect("/")
if otp and len(otp) == 6:
if not user.deviceid: #only add, don't update deviceid
try:
remoteOTP = AuthToken.objects.get(otp=otp)
except AuthToken.DoesNotExist:
return TemplateResponse(request, 'addDevice.html', {'userdata': user, 'firstname': first, 'error': ", <span id='error' style='display:none;color:#C90E0E;'>correctly this time:</span>"})
user.deviceid=remoteOTP.deviceid
user.save()
remoteOTP.delete()
return HttpResponse("<script>window.location='mitlocate://edu.mit.locate/';setTimeout(function(){window.close();},100);</script>")
locations=[
("W79",0,"Multi-Purpose Room",{'W79-MPR':45,'W79-280':70,'W79-154':75}),
("W79",1,"Late Night Cafe",{'W79-170':50,'W79-232':72,'W79-154':80,'W79-225':81,'W79-HMR':59}),
("W79",1,"TV Lounge near Late Night",{'W79-170':60,'W79-232':69,'W79-154':73,'W79-225':85}),
("W79",1,"Country Kitchen",{'W79-170':58,'W79-232':70,'W79-154':86,'W79-225':87}),
("W79",1,"Private Dining Room",{'W79-170':57,'W79-232':69,'W79-225':76}),
("W79",3,"3A Lounge east",{'W79-330':68,'W79-348':45,'W79-225':88,'W79-426':66,'W79-222':85}),
("W79",3,"3A Lounge west",{'W79-330':82,'W79-348':40,'W79-543':78,'W79-426':66,'W79-222':85}),
("W79",4,"4AB Lounge - TV area",{'W79-450':60,'W79-549':83,'W79-533':77,'W79-575':83,'W79-426':76,'W79-433':63,'W79-330':67}),
("W79",4,"4AB Lounge - Table",{'W79-450':49,'W79-549':81,'W79-533':88,'W79-433':67,'W79-426':73}),
("W79",4,"Room 464",{'W79-450':77,'W79-533':83,'W79-433':67,'W79-426':90,'W79-672':50}),
("W79",4,"4C west area",{'W79-580':80,'W79-274':85,'W79-440E':50,'W79-533':78,'W79-575':54,'W79-473':33}),
("W79",4,"4C east area",{'W79-575':74,'W79-280':66,'W79-580':53,'W79-440E':30,'W79-159':81}),
("W79",4,"4C Lounge",{'W79-580':70,'W79-280':80,'W79-440E':53,'W79-379':81,'W79-575':87,'W79-159':83}),
("W79",3,"3C west area",{'W79-379':51,'W79-280':77,'W79-337':45,'W79-473':54}),
("W79",3,"3C east area",{'W79-379':36,'W79-280':51,'W79-440E':61,'W79-159':69,'W79-473':78}),
("W79",3,"3C Lounge",{'W79-379':65,'W79-440E':78,'W79-280':80,'W79-337':56}),
("W79",3,"Room 340B",{'W79-379':66,'W79-440E':76,'W79-337':60,'W79-280':67,'W79-580':76}),
("W79",5,"5B area",{'W79-575':65,'W79-543':63,'W79-533':48,'W79-549':58,'W79-580':65,'W79-433':67}),
("W79",5,"5C area",{'W79-575':47,'W79-543':66,'W79-533':66,'W79-549':74,'W79-580':27}),
("E2",4,"Room 406",{'E2-404':40,'E2-411':70}),
("E2",4,"4WAR lounge",{'E2-404':76,'E2-421':70,'E2-411':44}),
("E2",4,"4WAR kitchen",{'E2-433':84,'E2-421':60,'E2-428':77}),
("E2",4,"4th floor lounge",{'E2-428':38,'E2-421':74,'E2-441':71,'E2-431':60})
]
# MOBILE APPLICATION API
def auth(request):
if request.method == 'GET':
return HttpResponse(status=403)
elif request.method == 'POST':
jsonData = json.loads(request.body)
response = {}
if "UUID" not in jsonData:
response["error"] = "Invalid request."
else:
try:
user = User.objects.get(deviceid = jsonData["UUID"])
if user.enabled:
response["status"] = "ok"
response["name"] = user.name.split(" ")[0]
response["fullname"] = user.name
response["email"] = user.email
else:
response["status"] = "banned"
except User.DoesNotExist:
#random.seed(request.POST["UUID"])
remoteOTP,c = AuthToken.objects.get_or_create(deviceid = jsonData["UUID"])
if c:
remoteOTP.otp = ''.join(random.choice("ABCDEFGHKLMNPQRTUVWXYZ23467892346789") for _ in range(6))
remoteOTP.save()
response["otp"] = remoteOTP.otp
response["status"] = "unverified"
return HttpResponse(json.dumps(response))
def add(request):
if request.method == 'GET':
return HttpResponse(status = 403)
elif request.method == 'POST':
jsonData = json.loads(request.body)
if "UUID" not in jsonData or "building" not in jsonData or "desc" not in jsonData:
return HttpResponse("Invalid request.")
try:
user = User.objects.get(deviceid=request.POST["UUID"])
if not user.enabled:
return HttpResponse("_DISABLED")
except User.DoesNotExist:
return HttpResponse("_DEACT")
APdata = [(int(a[:-1],16),int(s)) for a,s in request.POST.iteritems() if len(a)==12]
APdata.sort(key = lambda t: t[1])
APsignals = {}
for a,s in APdata:
if a in APsignals:
APsignals[a].append(int(s))
else:
APsignals[a] = [s]
APdata = []
for a,s in APsignals.iteritems():
s.sort()
del s[2:]
if s[0] < 85:
APdata.append((a,sum(s, 0.0) / len(s)))
APdata.sort(key = lambda t: t[1])
del APdata[4:]
#new=0
#TODO: make this create, not get_or_create
try:
bldg=Building.objects.get(name=request.POST["building"])
except Building.DoesNotExist:
return HttpResponse("Unknown building.")
l,c=Location.objects.get_or_create(building=bldg,desc=request.POST["desc"])
l.signals.all().delete()
l.reporter=user
for (mac,signal) in APdata:
a,c=AP.objects.get_or_create(mac=mac)
if signal < 85:
a.building=bldg
a.save()
#if c:
# new+=1
l.signals.create(ap=a,strength=signal)
l.save()
return HttpResponse("OK")
#return HttpResponse("Thanks, "+str(user.name.split(" ")[0])+"!\n"+request.POST["name"]+" signal data "+("updat" if c else "sav")+"ed.\n"+str(new)+" new APs saved.")
def heartbeat(request):
if request.method == 'GET':
return HttpResponse(status=403)
elif request.method == 'POST':
jsonData = json.loads(request.body)
response = {"status":0}
if "UUID" not in jsonData:
response["status"]=-1
return HttpResponse(json.dumps(response))
try:
user = User.objects.get(deviceid=jsonData["UUID"])
if not user.enabled:
response["status"]=1
return HttpResponse(json.dumps(response))
else:
response["sharing"] = user.sharing
response["shares"] = {}
for share in user.shares.all():
response["shares"][share.id] = {
"name" : share.name
}
response["faves"] = getFaves(user)
except User.DoesNotExist:
response["status"]=2
return HttpResponse(json.dumps(response))
APdata = filterAPs(jsonData["aps"])
if not APdata:
response["location"] = "Not on campus"
# APdata.sort(key = lambda t: t[1])
nearbyBuildings={}
for row in APdata:
try:
ap = AP.objects.get(mac=row[0])
if ap.building:
if ap.building in nearbyBuildings:
nearbyBuildings[ap.building] += 1/row[1]**2
else:
nearbyBuildings[ap.building] = 1/row[1]**2
except AP.DoesNotExist:
pass
if not nearbyBuildings:
response["location"] = "<b>Unknown</b><br>Location unknown"
return HttpResponse(json.dumps(response))
possibles = []
for bldg in nearbyBuildings:
for location in bldg.locations.filter(active=True):
sigs = location.signals.all()
dist = 0
for ap,signal in APdata:
sig = sigs.filter(ap=AP.objects.filter(mac=ap))
if sig:
expected = sig[0].strength
else:
expected = 95 if signal > 75 else 120
dist+=(expected-signal)**2
if dist < 2000:
possibles.append((location.building,location.floor,location,dist))
if possibles: # Determine most likely location out of possibles
possibles.sort(key=lambda d: d[3])
b,f,l,d = possibles[0]
response["location"] = "<b>"+str(b)+"</b><br>"+l.desc+" ("+str(d)+")"
#try round(100-7*math.log(d,10)) for a "confidence percentage"
user.location = l
user.age = datetime.today()
user.save()
# elif datetime.datetime - user.age < 1000:
# from django.contrib.humanize.templatetags import humanize
# text = "<b>"+str(user.location.building)+"</b><br>"+str(user.location.desc)+" ("+humanize.naturaltime(user.age)+")"
# return HttpResponse(text)
else: # Fall back to just building + floor
# user.location = l
s=0;w=0
for row in APdata:
try:
#TODO: use cached AP objects instead of accessing DB here
ap = AP.objects.get(mac=row[0])
weight = 1/row[1]**3
s+=int(ap.floor)*weight
w+=weight
except AP.DoesNotExist:
pass
f = int(round(s/w))
f = humanize.ordinal(f) + " floor" if f>0 else "Basement"
#f = str(round(s/w,2))
likelyBuilding = max(nearbyBuildings.iterkeys(), key=(lambda key: nearbyBuildings[key]))
#likelyBuilding = Building.objects.get(number=likelyBuildingNumber)
response["location"] = "<b>"+str(user)+"</b><br>"+(str(likelyBuilding.name) if likelyBuilding.name else "Building " + str(likelyBuilding.number)) + "<br>"+f
user.building = likelyBuilding
user.floor = int(round(s/w,0))
user.age = datetime.today()
user.save()
return HttpResponse(json.dumps(response))
def getFaves(user):
faves = []
for fave in user.faves.all().order_by('name'):
floor = humanize.ordinal(fave.floor) + " floor" if fave.floor>0 else "Basement"
fobj = {"id":fave.id, "name":fave.name, "email":fave.email}
if fave.age and fave.enabled and datetime.today()-fave.age < timedelta(hours=6) and (fave.sharing == 0 or (fave.sharing == 1 and user in fave.shares.all())):
if fave.building.id == 160: # Remove this hardcoded check once locations for buses are implemented
loc = str(fave.building)
else:
loc = str(fave.location) if fave.location else str(fave.building) + " - " + floor
fobj["location"] = loc
fobj["bldgid"] = fave.building.id
fobj["bldgnum"] = fave.building.number
fobj["bldg"] = str(fave.building)
fobj["age"] = humanize.naturaltime(fave.age)
#fobj["age"] = humanize.naturaltime(fave.age) if datetime.today()-fave.age > timedelta(seconds=15) else "Now"
else:
fobj["location"] = "Location not available"
faves.append(fobj)
return faves
def invite(request):
if request.method == 'GET':
return HttpResponse(status=403)
elif request.method == 'POST':
jsonData = json.loads(request.body)
response = {}
if all([key in jsonData for key in ["UUID", "username"]]):
try:
user = User.objects.get(deviceid=jsonData["UUID"])
if user.enabled:
invitee = jsonData['username']
con = ldap.open('ldap-too.mit.edu')
con.simple_bind_s("", "")
dn = "dc=mit,dc=edu"
fields = ['cn', 'sn', 'givenName', 'mail', ]
userfilter = ldap.filter.filter_format('uid=%s', (invitee, ))
result = con.search_s(dn, ldap.SCOPE_SUBTREE, userfilter, fields)
if len(result) == 1:
results = User.objects.filter(Q(email=result[0][1]['mail'][0]) | Q(email=jsonData["username"]+"@mit.edu"))
if results:
if results[0] == user:
response["message"] = "You can't add yourself."
else:
if results[0] in user.faves.all():
response["message"] = results[0].name + " is already your friend."
else:
user.faves.add(results[0])
user.save()
response["message"] = "Added " + results[0].name
else:
firstname = result[0][1]['givenName'][0]
lastname = result[0][1]['sn'][0]
msg=MIMEMultipart('alternative')
text="You've received an invitation to join MIT Locate!\n\nMuch like the Marauder's Map from the Harry Potter series, MIT Locate allows you to locate your friends on the MIT campus! Just visit https://locate.mit.edu/ to get started."
html="""\
<div height='100%' width='100%' style='background-color:#eeeeee;text-align:center;'>
<div border='0' style='min-width:550px;width:550px;margin:0 auto;text-align:center;padding:25px;'>
<div style='background-color:#340004;text-align:center;color:#E8D18E;font-family:Arial,sans-serif;font-size:24pt;font-weight:bold;padding:20px;-webkit-border-top-left-radius:15px;-webkit-border-top-right-radius:15px;'>You're A Wizard, """ + firstname + """
</div>
<div style='text-align:left;color:#340004;font-family:Arial,sans-serif;font-size:17px;background:#E8D18E;padding:20px 45px;line-height:30px;'>
<p>Hi there!</p>
<p>""" + user.name + """ has invited you to join MIT Locate. What's MIT Locate, you ask? It's a free service created exclusively for members of the MIT community that allows you to locate your friends on campus. Sounds like the <a href="http://harrypotter.wikia.com/wiki/Marauder's_Map">Marauder's Map</a>, huh? We like to call it the Marauder's <i>App</i>. Check it out by clicking the button below!</p>
</div>
<div style='background:#B30000;padding:15px;-webkit-border-bottom-left-radius:15px;-webkit-border-bottom-right-radius:15px;'>
<a href='https://locate.mit.edu/' style='text-decoration:none;color:#E8D18E;text-align:center;font-family:Arial,sans-serif;font-size:28pt;font-weight:bold;'>SIGN UP</a>
</div><br/>
<a style='text-decoration:none; margin-top: 20px; color:#999999;font-family:Arial,sans-serif; font-size:14px;' href="https://locate.mit.edu/privacy">Privacy policy</a>
</div>
</div>\
"""
msg.attach(MIMEText(text, 'text'))
msg.attach(MIMEText(html, 'html'))
msg['Subject'] = 'You\'re a wizard, ' + firstname
fromField = '"' + user.name + ' via MIT Locate" <'+ user.email + '>'
toField = '"' + firstname + ' ' + lastname + '" <' + invitee + '@mit.edu>'
msg['From'] = fromField
msg['To'] = toField
#msg.add_header('reply-to', user.email)
s = smtplib.SMTP('localhost')
#s.sendmail(fromField, toField, msg.as_string())
s.quit()
user.save()
response["message"] = "Invited " + firstname + "."
else:
response["message"] = "MIT people only!"
except User.DoesNotExist:
pass
return HttpResponse(json.dumps(response))
def feedback(request):
if request.method == 'GET':
return HttpResponse(status=403)
elif request.method == 'POST':
jsonData = json.loads(request.body)
response = {"success":False}
if all([key in jsonData for key in ["UUID", "message"]]):
try:
user = User.objects.get(deviceid=jsonData["UUID"])
if user.enabled:
msg=MIMEText(jsonData['message'], 'plain')
msg['Subject'] = 'MIT Locate mobile app feedback'
fromField = '"' + user.name + ' (via MIT Locate)" <'+ user.email + '>'
msg['From'] = fromField
msg['To'] = '"MIT Locate Feedback" <locate@mit.edu>'
#msg.add_header('reply-to', user.email)
s = smtplib.SMTP('localhost')
s.sendmail(fromField, '"MIT Locate Feedback" <locate@mit.edu>', msg.as_string())
s.quit()
response["success"] = True
except User.DoesNotExist:
pass
return HttpResponse(json.dumps(response))
def sharing(request):
if request.method == 'GET':
return HttpResponse(status=403)
elif request.method == 'POST':
jsonData = json.loads(request.body)
response = {"success":False}
if all([key in jsonData for key in ["UUID", "mode"]]) and 0 <= int(jsonData["mode"]) <= 2:
try:
user = User.objects.get(deviceid=jsonData["UUID"])
if user.enabled:
user.sharing = int(jsonData["mode"])
user.save()
response["success"] = True
except User.DoesNotExist:
pass
return HttpResponse(json.dumps(response))
def updateFriend(request):
if request.method == 'GET':
return HttpResponse(status=403)
elif request.method == 'POST':
jsonData = json.loads(request.body)
response = {"success":False}
if "UUID" in jsonData:
try:
user = User.objects.get(deviceid=jsonData["UUID"])
if user.enabled:
if "remove" in jsonData:
user.faves.remove(User.objects.get(pk=jsonData["remove"]))
# maybe remove this later
user.shares.remove(User.objects.get(pk=jsonData["remove"]))
if "share" in jsonData:
user.shares.add(User.objects.get(pk=jsonData["share"]))
if "unshare" in jsonData:
user.shares.remove(User.objects.get(pk=jsonData["unshare"]))
user.save()
response["success"] = True
except User.DoesNotExist:
pass
return HttpResponse(json.dumps(response))
def filterAPs(aps):
APsignals = {}
for ap in aps:
bssid = ap["bssid"]
ssid = ap["ssid"]
signal = int(ap["signal"])
def add(bssid):
if bssid in APsignals:
APsignals[bssid].append(signal)
else:
APsignals[bssid] = [signal]
if ssid in ("MITguest", "MITpass"):
add("00" + bssid[2:])
elif ssid in ("MIT", "MIT N", "MIT SECURE", "MIT SECURE N", "MIT GUEST", "Media Lab 5Ghz", "EECS@Stata", "Media Lab", "EECS-MTL-RLE", "Stata Center"):
add(bssid[:-1] + "0")
APdata = []
for bssid,signal in APsignals.iteritems():
signal.sort()
del signal[2:]
APdata.append((bssid,sum(signal, 0.0) / len(signal)))
return APdata
def fetchAPs(request):
if request.method == 'GET':
return HttpResponse(status=403)
elif request.method == 'POST':
jsonData = json.loads(request.body)
response = {}
if "UUID" in jsonData:
try:
user = User.objects.get(deviceid=jsonData["UUID"])
if not user.enabled:
return HttpResponse(json.dumps(response))
APdata = filterAPs(jsonData["aps"])
response["aps"] = []
for bssid,signal in APdata:
try:
ap=AP.objects.get(mac=bssid)
try:
com = ap.comment
except AttributeError:
com = ""
try:
aptype = str(ap.type)
except AttributeError:
aptype = 0
try:
bnum = ap.building.number
except (AttributeError, Building.DoesNotExist):
bnum = ""
try:
flr = str(ap.floor)
except AttributeError:
flr = ""
response["aps"].append({
"bssid":bssid,
"type":aptype,
"comment":com,
"building":bnum,
"floor":flr,
"signal":str(int(round(signal,0)))
})
except AP.DoesNotExist:
response["aps"].append({
"bssid":bssid,
"type":0,
"comment":"",
"building":"",
"floor":"",
"signal":str(int(round(signal,0)))
})
response["aps"].sort(key=lambda ap: ap["signal"])
except User.DoesNotExist:
pass
return HttpResponse(json.dumps(response))
def updateAP(request):
if request.method == 'GET':
return HttpResponse(status=403)
elif request.method == 'POST':
jsonData = json.loads(request.body)
response = {"success":False}
if all([key in jsonData for key in ["UUID", "bssid", "comment", "building", "floor"]]):
try:
user = User.objects.get(deviceid=jsonData["UUID"])
if user.enabled:
ap,c=AP.objects.get_or_create(mac=jsonData["bssid"])
ap.comment=jsonData["comment"]
try:
ap.building=Building.objects.get(number=jsonData["building"])
ap.floor=int(jsonData["floor"]) if jsonData["floor"] else 1
ap.save()
response["success"]=True
except Building.DoesNotExist:
pass
except User.DoesNotExist:
pass
return HttpResponse(json.dumps(response))
def deactivate(request):
if request.method == 'GET':
return HttpResponse(status=403)
elif request.method == 'POST':
jsonData = json.loads(request.body)
response = {"success":False}
if "UUID" in jsonData:
try:
user = User.objects.get(deviceid=jsonData["UUID"])
if user.enabled:
user.deviceid=""
user.save()
response["success"]=True
except User.DoesNotExist:
pass
return HttpResponse(json.dumps(response))
|
abevac/mitlocate-web
|
serverus/views.py
|
Python
|
mit
| 31,956
|
[
"VisIt"
] |
9962c331d566287530891fff67e4b858882067be664e423b8339700a07e18965
|
# -*- coding: utf-8 -*-
"""Akvo RSR is covered by the GNU Affero General Public License.
See more details in the license.txt file located at the root folder of the Akvo RSR module.
For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.
"""
from decimal import Decimal, InvalidOperation
import itertools
from django.conf import settings
from django.contrib.admin.models import LogEntry, ADDITION, CHANGE
from django.contrib.auth import get_user_model
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import ValidationError, ObjectDoesNotExist, MultipleObjectsReturned
from django.core.mail import send_mail
from django.core.validators import MaxValueValidator, MinValueValidator
from django.db import models
from django.apps import apps
from django.db.models import Sum
from django.db.models.signals import post_save, post_delete
from django.dispatch import receiver
from django.urls import reverse
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext_lazy as _
from django.db import transaction
from django.db.models import Q
from django.db.models import JSONField
from django.utils.functional import cached_property
from sorl.thumbnail.fields import ImageField
from akvo.codelists.models import (AidType, ActivityScope, ActivityStatus, CollaborationType,
FinanceType, FlowType, TiedStatus)
from akvo.codelists.store.default_codelists import (
AID_TYPE_VOCABULARY, ACTIVITY_SCOPE, ACTIVITY_STATUS, COLLABORATION_TYPE, CURRENCY,
FINANCE_TYPE, FLOW_TYPE, TIED_STATUS, BUDGET_IDENTIFIER_VOCABULARY
)
from akvo.utils import (codelist_choices, codelist_value, codelist_name, rsr_image_path,
rsr_show_keywords, single_period_dates)
from ..fields import ProjectLimitedTextField, ValidXMLCharField, ValidXMLTextField
from ..mixins import TimestampsMixin
from .iati_check import IatiCheck
from .result import IndicatorPeriod
from .model_querysets.project import ProjectQuerySet
from .partnership import Partnership
from .project_update import ProjectUpdate
from .project_editor_validation import ProjectEditorValidationSet
from .publishing_status import PublishingStatus
from .related_project import RelatedProject
from .budget_item import BudgetItem
DESCRIPTIONS_ORDER = [
'project_plan_summary', 'goals_overview', 'background', 'current_status', 'target_group',
'project_plan', 'sustainability']
def get_default_descriptions_order():
return DESCRIPTIONS_ORDER
def image_path(instance, file_name):
return rsr_image_path(instance, file_name, 'db/project/%(instance_pk)s/%(file_name)s')
class MultipleReportingOrgs(Exception):
pass
class Project(TimestampsMixin):
CURRENCY_CHOICES = codelist_choices(CURRENCY)
HIERARCHY_OPTIONS = (
(1, _('Core Activity')),
(2, _('Sub Activity')),
(3, _('Lower Sub Activity'))
)
LANGUAGE_OPTIONS = (
('de', _('German')),
('en', _('English')),
('es', _('Spanish')),
('fr', _('French')),
('nl', _('Dutch')),
('ru', _('Russian'))
)
TARGETS_AT_OPTION = (
('period', _('Period')),
('indicator', _('Indicator')),
('both', _('Both'))
)
STATUS_NONE = 'N'
STATUS_NEEDS_FUNDING = 'H'
STATUS_ACTIVE = 'A'
STATUS_COMPLETE = 'C'
STATUS_CANCELLED = 'L'
STATUS_ARCHIVED = 'R'
STATUSES = (
(STATUS_NONE, ''),
(STATUS_NEEDS_FUNDING, _('Needs funding')),
(STATUS_ACTIVE, _('Active')),
(STATUS_COMPLETE, _('Complete')),
(STATUS_CANCELLED, _('Cancelled')),
(STATUS_ARCHIVED, _('Archived')),
)
STATUSES_COLORS = {
'': 'grey',
'1': 'orange',
'2': '#AFF167',
'3': 'grey',
'4': 'grey',
'5': 'red',
'6': 'grey',
}
CODE_TO_STATUS = {
'': 'N',
'1': 'H',
'2': 'A',
'3': 'C',
'4': 'C',
'5': 'L',
'6': 'C'
}
STATUS_TO_CODE = {
'N': '',
'H': '1',
'A': '2',
'C': '3',
'L': '5',
'R': '3'
}
# Status combinations used in conditionals
EDIT_DISABLED = []
DONATE_DISABLED = ['', '3', '4', '5', '6']
NOT_SUSPENDED = ['', '1', '2', '3', '4', '5']
title = ValidXMLCharField(_('project title'), max_length=200, db_index=True, blank=True)
subtitle = ValidXMLCharField(_('project subtitle'), max_length=200, blank=True)
status = ValidXMLCharField(
_('status'), max_length=1, choices=STATUSES, db_index=True, default=STATUS_NONE
)
iati_status = ValidXMLCharField(
_('status'), max_length=1, choices=(codelist_choices(ACTIVITY_STATUS)), db_index=True,
blank=True,
help_text=_('There are six different project statuses:<br/>'
'1) Pipeline/identification: the project is being scoped or planned<br/>'
'2) Implementation: the project is currently being implemented<br/>'
'3) Completion: the project is complete or the final disbursement has been '
'made<br/>'
'4) Post-completion: the project is complete or the final disbursement has '
'been made, '
'but the project remains open pending financial sign off or M&E<br/>'
'5) Cancelled: the project has been cancelled<br/>'
'6) Suspended: the project has been temporarily suspended '
'or the reporting partner no longer uses RSR.')
)
categories = models.ManyToManyField(
'Category', verbose_name=_('categories'), related_name='projects', blank=True
)
partners = models.ManyToManyField(
'Organisation', verbose_name=_('partners'), through='Partnership', related_name='projects',
blank=True,
)
project_plan_summary = ProjectLimitedTextField(
_('summary of project plan'), max_length=2000, blank=True,
help_text=_('Enter a brief summary, try to restrict the number of characters to 400 in '
'order to display the summary nicely on the project page. The summary should '
'explain:<br>'
'- Why the project is being carried out;<br>'
'- Where it is taking place;<br>'
'- Who will benefit and/or participate;<br>'
'- What it specifically hopes to accomplish;<br>'
'- How those specific goals will be reached')
)
current_image = ImageField(
_('photo'), blank=True, upload_to=image_path,
help_text=_('Add your project photo here. You can only add one photo. If you have more, '
'you can add them via RSR updates when your project is published. A photo '
'album will feature on the project page. The photo should not be larger '
'than 2 MB in size, and should preferably be in JPG format.'),
)
current_image_caption = ValidXMLCharField(
_('photo caption'), blank=True, max_length=60,
help_text=_('Briefly describe who or what you see in the photo.')
)
current_image_credit = ValidXMLCharField(
_('photo credit'), blank=True, max_length=60,
help_text=_('Enter who took the photo.')
)
goals_overview = ValidXMLTextField(
_('goals overview'), blank=True,
help_text=_('Provide a brief description of the overall project goals. For links and '
'styling of the text, <a href="https://github.com/adam-p/markdown-here/wiki/'
'Markdown-Cheatsheet" target="_blank">Markdown</a> is supported.')
)
current_status = ValidXMLTextField(
_('baseline situation'), blank=True,
help_text=_('Describe the situation at the start of the project. For links and styling of '
'the text, <a href="https://github.com/adam-p/markdown-here/wiki/Markdown-'
'Cheatsheet" target="_blank">Markdown</a> is supported.')
)
project_plan = ValidXMLTextField(
_('project plan'), blank=True,
help_text=_('Detailed information about the implementation of the project: the what, how, '
'who and when. For links and styling of the text, <a href="https://github.com/'
'adam-p/markdown-here/wiki/Markdown-Cheatsheet" target="_blank">Markdown</a> '
'is supported.')
)
sustainability = ValidXMLTextField(
_('sustainability'), blank=True,
help_text=_('Describe how you aim to guarantee sustainability of the project until 10 '
'years after project implementation. Think about the institutional setting, '
'capacity-building, a cost recovery plan, products used, feasible '
'arrangements for operation and maintenance, anticipation of environmental '
'impact and social integration. For links and styling of the text, '
'<a href="https://github.com/adam-p/markdown-here/wiki/Markdown-Cheatsheet" '
'target="_blank">Markdown</a> is supported.')
)
background = ValidXMLTextField(
_('background'), blank=True,
help_text=_('This should describe the geographical, political, environmental, social '
'and/or cultural context of the project, and any related activities that '
'have already taken place or are underway. For links and styling of the text, '
'<a href="https://github.com/adam-p/markdown-here/wiki/Markdown-Cheatsheet" '
'target="_blank">Markdown</a> is supported.')
)
target_group = ProjectLimitedTextField(
_('target group'), blank=True,
help_text=_('This should include information about the people, organisations or resources '
'that are being impacted by this project. For links and styling of the text, '
'<a href="https://github.com/adam-p/markdown-here/wiki/Markdown-Cheatsheet" '
'target="_blank">Markdown</a> is supported.')
)
descriptions_order = JSONField(default=get_default_descriptions_order)
# Result aggregation
aggregate_children = models.BooleanField(
_('Aggregate results data from child projects'), default=True,
help_text=_('By selecting this option, the results data of child projects will be aggregated to this project. '
'In the child project(s), this can be turned off per project as well.')
)
aggregate_to_parent = models.BooleanField(
_('Aggregate results data to parent project'), default=True,
help_text=_('By selecting this option, the results data of this project will be aggregated '
'to the parent project.')
)
# Results framework (always on)
is_impact_project = models.BooleanField(
_('is rsr impact project'), default=True,
help_text=_('Determines whether the results framework is active for this project.')
)
# Private projects
is_public = models.BooleanField(
_('is public project'), default=True,
help_text=_('Determines whether this project is a public project.')
)
# project meta info
language = ValidXMLCharField(
max_length=2, choices=LANGUAGE_OPTIONS, blank=True,
help_text=_('Enter the language used when entering the details for this project.')
)
notes = ValidXMLTextField(
_('project comments'), blank=True,
help_text=_('The project comments are only for internal use and will not be displayed '
'anywhere on the project page.')
)
keywords = models.ManyToManyField(
'Keyword', verbose_name=_('keyword'), related_name='projects', blank=True,
help_text=_('Choose a keyword to link to this project.')
)
targets_at = ValidXMLCharField(
max_length=9, choices=TARGETS_AT_OPTION, default='period',
help_text=_('Which project attributes that has a target value')
)
# budget
currency = ValidXMLCharField(
_('currency'), choices=CURRENCY_CHOICES, max_length=3, default='EUR',
help_text=_('The default currency for this project. Used in all financial '
'aspects of the project.')
)
date_start_planned = models.DateField(
_('start date (planned)'), null=True, blank=True,
help_text=_('Enter the original start date of the project (DD/MM/YYYY).')
)
date_start_actual = models.DateField(
_('start date (actual)'), null=True, blank=True,
help_text=_('Enter the actual start date of the project (DD/MM/YYYY).')
)
date_end_planned = models.DateField(
_('end date (planned)'), null=True, blank=True,
help_text=_('Enter the original end date of the project (DD/MM/YYYY).')
)
date_end_actual = models.DateField(
_('end date (actual)'), null=True, blank=True,
help_text=_('Enter the actual end date of the project (DD/MM/YYYY).')
)
primary_location = models.ForeignKey('ProjectLocation', null=True, on_delete=models.SET_NULL)
# primary_organisation is a denormalized field used for performance of the project list page
primary_organisation = models.ForeignKey('Organisation', null=True, on_delete=models.SET_NULL)
# donate url
donate_url = models.URLField(
_('donate url'), null=True, blank=True, max_length=200,
help_text=_('Add a donation url for this project. If no URL is added, it is not possible '
'to donate to this project through RSR.')
)
# donations
donations = models.DecimalField(
max_digits=14, decimal_places=2, blank=True, null=True, db_index=True, default=0,
help_text=_('The total sum of donations the project has already recieved.')
)
# extra IATI fields
iati_activity_id = ValidXMLCharField(
_('IATI identifier'), max_length=100, blank=True, db_index=True, null=True, unique=True,
help_text=_('This is a globally unique identifier for this activity. It is a requirement '
'to be compliant with the IATI standard. This code consists of: '
'[country code]-[Chamber of Commerce number]-[organisation’s internal project '
'code]. For Dutch organisations this is e.g. NL-KVK-31156201-TZ1234. For more '
'information see') + ' <a href="http://iatistandard.org/202/activity-standard/'
'iati-activities/iati-activity/iati-identifier/'
'#definition" target="_blank">http://iatistandard.org/'
'201/activity-standard/iati-activities/iati-activity/'
'iati-identifier/#definition</a>'
)
hierarchy = models.PositiveIntegerField(
_('hierarchy'), null=True, blank=True, choices=HIERARCHY_OPTIONS,
help_text=_('If you are reporting multiple levels of projects in RSR, you can specify '
'whether this is a core, sub, or lower sub activity here.')
)
project_scope = ValidXMLCharField(
_('project scope'), blank=True, max_length=2, choices=codelist_choices(ACTIVITY_SCOPE),
help_text=_('Select the geographical scope of the project.')
)
capital_spend_percentage = models.DecimalField(
_('capital spend percentage'), blank=True, null=True, max_digits=4, decimal_places=1,
validators=[MaxValueValidator(100), MinValueValidator(0)],
help_text=_('The percentage of the total commitment allocated to or planned for capital '
'expenditure. Content must be a positive decimal number between 0 and 100, '
'with no percentage sign. Use a period to denote decimals.')
)
collaboration_type = ValidXMLCharField(
_('collaboration type'), blank=True, max_length=1,
choices=codelist_choices(COLLABORATION_TYPE),
help_text=_('This is the IATI identifier for the type of collaboration involved. For '
'reference, please visit: <a href="http://iatistandard.org/202/codelists/'
'CollaborationType/" target="_blank">http://iatistandard.org/202/codelists/'
'CollaborationType/</a>.')
)
default_aid_type_vocabulary = ValidXMLCharField(
_('default aid type vocabulary'), blank=True, max_length=1, default='1',
choices=codelist_choices(AID_TYPE_VOCABULARY),
help_text=_('This is the IATI identifier for the type of vocabulary being used for '
'describing the type of the aid being supplied or activity '
'being undertaken. For reference, please visit: <a '
'href="http://iatistandard.org/203/codelists/AidTypeVocabulary/" target='
'"_blank"> http://iatistandard.org/203/codelists/AidTypeVocabulary/</a>.')
)
default_aid_type = ValidXMLCharField(
_('default aid type'),
blank=True, max_length=3,
help_text=_('This is the IATI identifier for the type of aid being supplied or activity '
'being undertaken. This element specifies a default for all the project’s '
'financial transactions. This can be overridden at the individual transaction '
'level. For reference, please visit: <a href="http://iatistandard.org/202/'
'codelists/AidType/" target="_blank">http://iatistandard.org/202/codelists/'
'AidType/</a>.')
)
default_finance_type = ValidXMLCharField(
_('default finance type'), blank=True, max_length=3,
choices=codelist_choices(FINANCE_TYPE),
help_text=_('This is the IATI identifier for the type of finance. This element specifies '
'a default for all the transactions in the project’s activity report; it can '
'be overridden at the individual transaction level. For reference visit: '
'<a href="http://iatistandard.org/202/codelists/FinanceType/" target="_blank">'
'http://iatistandard.org/202/codelists/FinanceType/</a>.')
)
default_flow_type = ValidXMLCharField(
_('default flow type'), blank=True, max_length=2, choices=codelist_choices(FLOW_TYPE),
help_text=_('This is the IATI identifier for how the activity (project) is funded. For '
'reference, please visit: <a href="http://iatistandard.org/202/codelists/'
'FlowType/" target="_blank">http://iatistandard.org/202/codelists/'
'FlowType/</a>.')
)
default_tied_status = ValidXMLCharField(
_('default tied status'), blank=True, max_length=10, choices=codelist_choices(TIED_STATUS),
help_text=_('This element specifies a default for all the activity’s financial '
'transactions; it can be overridden at the individual transaction level. For '
'reference, please visit: <a href="http://iatistandard.org/202/codelists/'
'TiedStatus/" target="_blank">http://iatistandard.org/202/codelists/'
'TiedStatus/</a>.')
)
country_budget_vocabulary = ValidXMLCharField(
_('country budget vocabulary'), blank=True, max_length=1,
choices=codelist_choices(BUDGET_IDENTIFIER_VOCABULARY),
help_text=_('Enter an IATI code for the common functional classification or country '
'system (this allows for common codes, country-specific codes, or any other '
'classification agreed between countries and donors) see: '
'<a href="http://iatistandard.org/202/codelists/BudgetIdentifierVocabulary/" '
'target="_blank">http://iatistandard.org/202/codelists/'
'BudgetIdentifierVocabulary/</a>.')
)
humanitarian = models.BooleanField(
_('humanitarian project'), null=True,
help_text=_('Determines whether this project relates entirely or partially to humanitarian aid.'),
)
# Project editor settings
validations = models.ManyToManyField(
'ProjectEditorValidationSet', verbose_name=_('validations'), related_name='projects'
)
use_project_roles = models.BooleanField(
verbose_name=_(u"use project roles"),
default=False,
help_text=_(u'Toggle between using project roles and employment based permissions'))
run_iati_checks = models.BooleanField(
verbose_name=_(u"run iati checks"),
default=False,
help_text=_(u'Flag to indicate that the project has pending IATI checks to be run')
)
# denormalized data
budget = models.DecimalField(
_('project budget'), max_digits=14, decimal_places=2, blank=True, null=True,
db_index=True, default=0
)
funds = models.DecimalField(
max_digits=14, decimal_places=2, blank=True, null=True, db_index=True, default=0
)
funds_needed = models.DecimalField(
max_digits=14, decimal_places=2, blank=True, null=True, db_index=True, default=0
)
last_update = models.ForeignKey(
ProjectUpdate, related_name='the_project', null=True, on_delete=models.SET_NULL
)
objects = ProjectQuerySet.as_manager()
class Meta:
app_label = 'rsr'
verbose_name = _('project')
verbose_name_plural = _('projects')
ordering = ['-id', ]
permissions = (
('post_updates', 'Can post updates'),
)
def delete(self, using=None, keep_parents=False):
# Delete results on the project, before trying to delete the project,
# since the RelatedProject object on the project refuses to get deleted
# if there are existing results, causing the delete to raise 500s
self.results.all().delete()
return super(Project, self).delete(using=using, keep_parents=keep_parents)
def save(self, *args, **kwargs):
# Strip title of any trailing or leading spaces
if self.title:
self.title = self.title.strip()
# Strip subtitle of any trailing or leading spaces
if self.subtitle:
self.subtitle = self.subtitle.strip()
# Strip IATI ID of any trailing or leading spaces
if self.iati_activity_id:
self.iati_activity_id = self.iati_activity_id.strip()
# In order for the IATI activity IDs to be unique, we set them to None when they're empty
if not self.iati_activity_id:
self.iati_activity_id = None
orig, orig_aggregate_children, orig_aggregate_to_parent = None, None, None
if self.pk:
orig = Project.objects.get(pk=self.pk)
# Update funds and funds_needed if donations change. Any other
# changes (budget, pledged amounts, ...) are handled by signals.
if self.donations != orig.donations:
self.funds = self.get_funds()
self.funds_needed = self.get_funds_needed()
# Update legacy status field
if self.iati_status != orig.iati_status:
self.status = self.CODE_TO_STATUS[self.iati_status]
super(Project, self).save(update_fields=['status'])
# Update IATI status field
if self.status != orig.status:
self.iati_status = self.STATUS_TO_CODE[self.status]
super(Project, self).save(update_fields=['iati_status'])
# Root project with modified targets_at must propagate change to children
if self.targets_at != orig.targets_at and hasattr(self, "projecthierarchy"):
descendants = self.descendants()
descendants.exclude(pk=self.pk).update(targets_at=self.targets_at)
orig_aggregate_children = orig.aggregate_children
orig_aggregate_to_parent = orig.aggregate_to_parent
super(Project, self).save(*args, **kwargs)
if orig:
# Update aggregation from children
if self.aggregate_children != orig_aggregate_children:
for period in IndicatorPeriod.objects.filter(indicator__result__project_id=self.pk):
if self.aggregate_children:
period.recalculate_period()
else:
period.recalculate_period(only_self=True)
# Update aggregation to parent
if self.aggregate_to_parent != orig_aggregate_to_parent:
for period in IndicatorPeriod.objects.filter(indicator__result__project_id=self.pk):
if period.parent_period:
period.parent_period.recalculate_period()
def clean(self):
# Don't allow a start date before an end date
if self.date_start_planned and self.date_end_planned and \
(self.date_start_planned > self.date_end_planned):
raise ValidationError(
{'date_start_planned': '%s' % _('Start date (planned) cannot be at a later '
'time than end date (planned).'),
'date_end_planned': '%s' % _('Start date (planned) cannot be at a later '
'time than end date (planned).')}
)
if self.date_start_actual and self.date_end_actual and \
(self.date_start_actual > self.date_end_actual):
raise ValidationError(
{'date_start_actual': '%s' % _('Start date (actual) cannot be at a later '
'time than end date (actual).'),
'date_end_actual': '%s' % _('Start date (actual) cannot be at a later '
'time than end date (actual).')}
)
def get_absolute_url(self):
return reverse('project-main', kwargs={'project_id': self.pk})
@property
def cacheable_url(self):
# Language names are 2 chars long
return self.get_absolute_url()[3:]
@cached_property
def is_unep_project(self):
return 'UNEP Marine Litter Stocktake' in self.keyword_labels()
def accepts_donations(self):
"""Returns True if a project accepts donations, otherwise False.
A project accepts donations when the donate url is set, the project is published,
the project needs funding and is not cancelled or archived."""
if self.donate_url and self.is_published() and self.funds_needed > 0 and \
self.iati_status not in Project.DONATE_DISABLED:
return True
return False
# New API, de-normalized fields support
def get_budget(self):
budgets = self.budget_items.filter(amount__gt=0)
total_budgets = budgets.filter(label__label='Total')
if total_budgets.exists():
revised_total_budgets = total_budgets.filter(type='2')
if revised_total_budgets.exists():
return revised_total_budgets.order_by('-pk')[0].amount
else:
return total_budgets.order_by('-pk')[0].amount
elif budgets.exists():
summed_up_budget = 0
for budget in budgets:
if budgets.filter(label=budget.label, type='2').exists():
if budget == budgets.filter(label=budget.label, type='2').order_by('-pk')[0]:
summed_up_budget += budget.amount
else:
summed_up_budget += budget.amount
return summed_up_budget
else:
return 0
def get_budget_project_currency(self):
qs = BudgetItem.objects.filter(project__id=self.pk).filter(currency__exact='').aggregate(Sum('amount'))
budget_project_currency = list(qs.values())[0]
return budget_project_currency if budget_project_currency >= 1 else 0.0
def update_budget(self):
"Update de-normalized field"
self.budget = self.get_budget()
self.save()
def get_pledged(self):
""" How much is pledges by funding organisations"""
return Partnership.objects.filter(project__exact=self).filter(
iati_organisation_role__exact=Partnership.IATI_FUNDING_PARTNER
).aggregate(Sum('funding_amount'))['funding_amount__sum'] or 0
def get_funds(self):
""" All money given to a project"""
return self.donations + self.get_pledged()
def update_funds(self):
"Update de-normalized field"
self.funds = self.get_funds()
self.save()
def get_funds_needed(self):
"""
How much more is needed to fulfill the project's budget needs. In case of a negative value
or a value less than 1, the value is set to 0.
"""
funds_needed = self.get_budget() - self.get_funds()
return funds_needed if funds_needed >= 1 else 0.0
def get_funds_needed_project_currency(self):
"Funds need in project currency, only used if budget items have multiple currencies"
funds_needed = Decimal(self.get_budget_project_currency()) - self.get_funds()
return funds_needed if funds_needed >= 1 else 0.0
def update_funds_needed(self):
"Update de-normalized field"
self.funds_needed = self.get_funds_needed()
self.save()
# End new API
@property
def last_modified_by(self):
"""Return the user who last edited this project and when the edit was made."""
entries = LogEntry.objects.filter(
object_id=str(self.id),
content_type=ContentType.objects.get_for_model(self),
action_flag=CHANGE,
).order_by('action_time')
if not entries.exists():
return None
last_entry = entries.last()
user_id = last_entry.user_id
last_modified_at = last_entry.action_time
User = get_user_model()
return dict(user=User.objects.only('first_name', 'last_name', 'email').get(id=user_id),
last_modified_at=last_modified_at)
@property
def reporting_partner(self):
""" In some cases we need the partnership object instead of the organisation to be able to
access is_secondary_reporter
"""
try:
return self.partnerships.get(
iati_organisation_role=Partnership.IATI_REPORTING_ORGANISATION)
except ObjectDoesNotExist:
return None
except MultipleObjectsReturned:
# A project with multiple reporting organisations should not happen, but in practice
# it sometimes does unfortunately. In these cases we check if there's one "primary
# reporter" and return that. If not, we return the first reporting organisation.
primary_reporters = self.partnerships.filter(
iati_organisation_role=Partnership.IATI_REPORTING_ORGANISATION).exclude(
is_secondary_reporter=True)
if primary_reporters.count() == 1:
return primary_reporters[0]
else:
return self.partnerships.filter(
iati_organisation_role=Partnership.IATI_REPORTING_ORGANISATION)[0]
@property
def reporting_org(self):
""" Returns the organisation of the partnership that is the reporting-org, if there is one
"""
return self.reporting_partner.organisation if self.reporting_partner else None
def organisation_codelist(self):
"""Return organisation specific custom codelist, if any."""
if self.reporting_org:
return self.reporting_org.codelist
return None
@property
def publishing_orgs(self):
"""
Returns the organisations that have the right to publish the project. In other words, that
have Organisation.can_create_project set to True.
"""
return self.partners.filter(can_create_projects=True)
def set_reporting_org(self, organisation):
""" Set the reporting-org for the project."""
if self.reporting_partner is not None:
partnership = self.reporting_partner
partnership.organisation = organisation
partnership.save(update_fields=['organisation'])
else:
Partnership.objects.create(
project=self,
organisation=organisation,
iati_organisation_role=Partnership.IATI_REPORTING_ORGANISATION
)
def set_accountable_partner(self, organisation):
"""Set the organisation as an accountable partner."""
try:
Partnership.objects.get_or_create(
project=self,
organisation=organisation,
iati_organisation_role=Partnership.IATI_ACCOUNTABLE_PARTNER
)
except Partnership.MultipleObjectsReturned:
# Ignore if there are one or more such partnerships
pass
def countries(self):
"""Return a list of countries for the project."""
country_codes = {c.country.lower() for c in self.recipient_countries.all()}
return (
[country for country in self.recipient_countries.all()]
+ [
location.country for location in self.locations.all()
if location.country and location.country.iso_code not in country_codes
]
)
def __str__(self):
return '%s' % self.title
def updates_desc(self):
"""ProjectUpdate list for self, newest first."""
return self.project_updates.select_related('user')
def show_status(self):
"Show the current project status"
if not self.iati_status == '0':
return mark_safe(
"<span style='color: %s;'>%s</span>" % (self.STATUSES_COLORS[self.iati_status],
codelist_name(ActivityStatus, self, 'iati_status'))
)
else:
return ''
def show_plain_status(self):
"Show the current project status value without styling"
if not self.iati_status == '0':
return codelist_name(ActivityStatus, self, 'iati_status')
else:
return ''
def show_keywords(self):
return rsr_show_keywords(self)
show_keywords.short_description = 'Keywords'
show_keywords.allow_tags = True
show_keywords.admin_order_field = 'keywords'
def is_published(self):
if self.publishingstatus:
return self.publishingstatus.status == PublishingStatus.STATUS_PUBLISHED
return False
is_published.boolean = True
def publish(self):
"""Set the publishing status to published."""
self.publishingstatus.status = PublishingStatus.STATUS_PUBLISHED
self.publishingstatus.save()
def unpublish(self):
"""Set the publishing status to unpublished."""
self.publishingstatus.status = PublishingStatus.STATUS_UNPUBLISHED
self.publishingstatus.save()
def is_empty(self):
exclude_fields = ['benchmarks', 'categories', 'created_at', 'crsadd', 'currency',
'custom_fields', 'fss', 'iati_checks', 'iati_project_exports',
'iatiexport', 'iatiimportjob', 'id', 'is_impact_project', 'is_public',
'last_modified_at', 'partners', 'partnerships', 'primary_organisation',
'primary_organisation_id', 'publishingstatus', 'status', 'validations']
for field in Project._meta.get_all_field_names():
if field not in exclude_fields:
field_value = getattr(self, field)
m2m_field = getattr(field_value, 'all', None)
if (m2m_field and m2m_field()) or (not m2m_field and getattr(self, field)):
return False
return True
def budget_total(self):
return Project.objects.budget_total().get(pk=self.pk).budget_total
def has_multiple_budget_currencies(self):
# Using a python loop for iteration, because it's faster when
# budget_items have been pre-fetched
budget_items = self.budget_items.all()
num_currencies = len(
set([self.currency] + [c.currency for c in budget_items if c.currency])
)
return num_currencies > 1
def budget_currency_totals(self):
budget_items = BudgetItem.objects.filter(project__id=self.pk)
unique_currencies = {c.currency if c.currency else self.currency for c in budget_items}
totals = {}
for c in unique_currencies:
if c == self.currency:
totals[c] = list(budget_items.filter(Q(currency='') | Q(currency=c)).aggregate(Sum('amount')).values())[0]
else:
totals[c] = list(budget_items.filter(currency=c).aggregate(Sum('amount')).values())[0]
return totals
def budget_currency_totals_string(self):
totals = self.budget_currency_totals()
total_string = ''
for t in totals:
total_string += '%s %s, ' % ("{:,.0f}".format(totals[t]), t)
return total_string[:-2]
def focus_areas(self):
from .focus_area import FocusArea
return FocusArea.objects.filter(categories__in=self.categories.all()).distinct()
focus_areas.allow_tags = True
# shortcuts to linked orgs for a single project
def _partners(self, role=None):
"""
Return the partner organisations to the project.
If role is specified only organisations having that role are returned
"""
orgs = self.partners.all()
if role:
return orgs.filter(partnerships__iati_organisation_role=role).distinct()
else:
return orgs.distinct()
def find_primary_organisation(self):
"""
This method tries to return the "managing" partner organisation.
"""
# Pick the reporting org first
if self.reporting_org:
return self.reporting_org
# Otherwise, pick the partner that can publish the project
if self.publishing_orgs:
return self.publishing_orgs[0]
# Otherwise, grab the first accountable partner we find
elif self.support_partners():
return self.support_partners()[0]
# Panic mode: grab the first partner we find
elif self.all_partners():
return self.all_partners()[0]
# Uh-oh...
else:
return None
def field_partners(self):
return self._partners(Partnership.IATI_IMPLEMENTING_PARTNER)
def funding_partners(self):
return self._partners(Partnership.IATI_FUNDING_PARTNER)
def sponsor_partners(self):
return self._partners(Partnership.AKVO_SPONSOR_PARTNER)
def support_partners(self):
return self._partners(Partnership.IATI_ACCOUNTABLE_PARTNER)
def extending_partners(self):
return self._partners(Partnership.IATI_EXTENDING_PARTNER)
def all_partners(self):
return self._partners()
def partner_organisation_pks(self):
"""Return all organisation ids along with hierarchy owner
If project is in a hierarchy, includes the hierarchy owner in the
partners list.
"""
pks = set(self._partners().values_list('id', flat=True))
hierarchy_org = self.get_hierarchy_organisation()
if hierarchy_org is not None:
pks.add(hierarchy_org.id)
return pks
def partners_info(self):
"""
Return a dict of the distinct partners with the organisation as key and as content:
1. The partnerships of the organisation
2. The (added up) funding amount, if available. Otherwise None.
E.g. {<Organisation 1>: [[<Partnership 1>,], 10000],}
"""
partners_info = {}
for partnership in Partnership.objects.filter(project=self):
funding_amount = partnership.funding_amount if partnership.funding_amount else None
if partnership.organisation not in partners_info:
partners_info[partnership.organisation] = [[partnership], funding_amount]
else:
partners_info[partnership.organisation][0].append(partnership)
existing_funding_amount = partners_info[partnership.organisation][1]
if funding_amount and existing_funding_amount:
partners_info[partnership.organisation][1] += funding_amount
elif funding_amount:
partners_info[partnership.organisation][1] = funding_amount
return partners_info
def funding_partnerships(self):
"Return the Partnership objects associated with the project that have funding information"
return self.partnerships.filter(iati_organisation_role=Partnership.IATI_FUNDING_PARTNER).order_by('organisation__name').prefetch_related('organisation').all()
def iati_project_scope(self):
return codelist_value(ActivityScope, self, 'project_scope')
def iati_project_scope_unicode(self):
return str(self.iati_project_scope())
def iati_collaboration_type(self):
return codelist_value(CollaborationType, self, 'collaboration_type')
def iati_collaboration_type_unicode(self):
return str(self.iati_collaboration_type())
def iati_default_flow_type(self):
return codelist_value(FlowType, self, 'default_flow_type')
def iati_default_flow_type_unicode(self):
return str(self.iati_default_flow_type())
def iati_default_finance_type(self):
return codelist_value(FinanceType, self, 'default_finance_type')
def iati_default_finance_type_unicode(self):
return str(self.iati_default_finance_type())
def iati_default_aid_type(self):
return codelist_value(AidType, self, 'default_aid_type')
def iati_default_aid_type_unicode(self):
return str(self.iati_default_aid_type())
def iati_default_tied_status(self):
return codelist_value(TiedStatus, self, 'default_tied_status')
def iati_default_tied_status_unicode(self):
return str(self.iati_default_tied_status())
def sector_categories_codes(self):
from .sector import Sector
sector_categories = Sector.objects.filter(project=self, vocabulary='2') | \
Sector.objects.filter(project=self, vocabulary='DAC-3')
return [sector.iati_sector_codes for sector in sector_categories]
def sector_categories(self):
from .sector import Sector
sector_categories = Sector.objects.filter(project=self, vocabulary='2') | \
Sector.objects.filter(project=self, vocabulary='DAC-3')
return [sector.iati_sector for sector in sector_categories]
def has_relations(self):
return self.parents() or self.children() or self.siblings()
def parents(self):
return self.parents_all().published().public()
def parents_all(self):
return (
Project.objects.filter(
related_projects__related_project=self,
related_projects__relation=RelatedProject.PROJECT_RELATION_CHILD
) | Project.objects.filter(
related_to_projects__project=self,
related_to_projects__relation=RelatedProject.PROJECT_RELATION_PARENT
)
).distinct()
def children(self):
return self.children_all().published().public()
def children_all(self):
return (
Project.objects.filter(
related_projects__related_project=self,
related_projects__relation=RelatedProject.PROJECT_RELATION_PARENT
) | Project.objects.filter(
related_to_projects__project=self,
related_to_projects__relation=RelatedProject.PROJECT_RELATION_CHILD
)
).distinct()
def siblings(self):
return self.siblings_all().published().public()
def siblings_all(self):
return (
Project.objects.filter(
related_projects__related_project=self,
related_projects__relation=RelatedProject.PROJECT_RELATION_SIBLING
) | Project.objects.filter(
related_to_projects__project=self,
related_to_projects__relation=RelatedProject.PROJECT_RELATION_SIBLING
)
).distinct()
def walk_hierarchy(self):
"""Generator to walk over the hierarchy of the project."""
children = self.children_all()
yield from itertools.zip_longest(children, [self], fillvalue=self)
for project in children:
yield from project.walk_hierarchy()
def descendants(self, depth=None):
"""
All child projects and all their children recursively
:param dephth: How "deep" we recurse. If None, drill all the way down
:return:
"""
family = {self.pk}
search_depth = 0
while depth is None or search_depth < depth:
children = Project.objects.filter(
Q(related_projects__related_project__in=family,
related_projects__relation=RelatedProject.PROJECT_RELATION_PARENT)
| Q(related_to_projects__project__in=family,
related_to_projects__relation=RelatedProject.PROJECT_RELATION_CHILD)
).values_list('pk', flat=True)
if family.union(children) == family:
break
family = family.union(children)
search_depth += 1
return Project.objects.filter(pk__in=family)
def ancestor(self):
"Find a project's ancestor, i.e. the parent or the parent's parent etc..."
parents = self.parents_all()
if parents and parents.count() == 1 and parents[0] != self:
return parents[0].ancestor()
else:
return self
def uses_single_indicator_period(self):
"Return the settings name of the hierarchy if there is one"
ancestor = self.ancestor()
if ancestor:
root_projects = settings.SINGLE_PERIOD_INDICATORS['root_projects']
pk = ancestor.pk
if pk in root_projects:
return root_projects[pk]
def in_eutf_hierarchy(self):
"""Check if the project is a part of the EUTF hierarchy."""
# FIXME: Ideally, we shouldn't need such a function and all
# functionality should be generic enough to enable/disable for other
# organisations.
return self.ancestor().id == settings.EUTF_ROOT_PROJECT
def in_nuffic_hierarchy(self):
"""Check if the project is a part of the Nuffic hierarchy."""
return self.ancestor().id == settings.NUFFIC_ROOT_PROJECT
def add_to_program(self, program):
self.set_reporting_org(program.reporting_org)
# Set validation sets
for validation_set in program.validations.all():
self.add_validation_set(validation_set)
# set parent
self.set_parent(program.pk)
# Import Results
self.import_results()
# Refresh to get updated attributes
self.refresh_from_db()
def is_master_program(self):
"""Return True if the project is a master program."""
from akvo.rsr.models import ProjectHierarchy
try:
hierarchy = ProjectHierarchy.objects.get(root_project=self)
return hierarchy.is_master
except ProjectHierarchy.DoesNotExist:
return False
def is_hierarchy_root(self):
"""Return True if the project is root project in a hierarchy."""
from akvo.rsr.models import ProjectHierarchy
try:
ProjectHierarchy.objects.get(root_project=self)
return True
except ProjectHierarchy.DoesNotExist:
return False
def get_hierarchy_organisation(self):
"""Return the hierarchy organisation if project belongs to one."""
from akvo.rsr.models import ProjectHierarchy
try:
hierarchy = ProjectHierarchy.objects.get(root_project=self.ancestor())
return hierarchy.organisation
except ProjectHierarchy.DoesNotExist:
return None
def get_program(self):
"""Return the program which this project includes."""
from akvo.rsr.models import ProjectHierarchy
ancestor = self.ancestor()
if ProjectHierarchy.objects.filter(root_project=ancestor).count() > 0:
return ancestor
else:
return None
def project_dates(self):
""" Return the project start and end dates, preferably the actuals. If they are not set, use
the planned values.
"""
start_date = (self.date_start_actual if self.date_start_actual
else self.date_start_planned)
end_date = (self.date_end_actual if self.date_end_actual
else self.date_end_planned)
return start_date, end_date
def project_hierarchy_context(self, context):
"Add info used in single period hierarchy projects if present"
hierarchy_name = self.uses_single_indicator_period()
context['start_date'], context['end_date'] = self.project_dates()
if hierarchy_name:
context['hierarchy_name'] = hierarchy_name
(
context['needs_reporting_timeout_days'],
context['period_start'],
context['period_end']
) = single_period_dates(hierarchy_name)
return context
def check_mandatory_fields(self):
from ...iati.checks.iati_checks import IatiChecks
iati_checks = IatiChecks(self)
return iati_checks.perform_checks()
def schedule_iati_checks(self):
self.run_iati_checks = True
self.save(update_fields=['run_iati_checks'])
def update_iati_checks(self):
"""
First, removes the current IATI checks, then adds new IATI checks.
"""
# Perform new checks
iati_checks = self.check_mandatory_fields()
# FIXME: Do we really need to create the "success" check objects? Where
# do we use them?
status_codes = {
'success': 1,
'warning': 2,
'error': 3
}
checks = [
IatiCheck(project=self, status=status_codes[status], description=description)
for (status, description) in iati_checks[1] if status in status_codes
]
with transaction.atomic():
# Remove old IATI checks
self.iati_checks.all().delete()
# Save new checks to DB
IatiCheck.objects.bulk_create(checks)
# Mark project as checked
self.run_iati_checks = False
self.save(update_fields=['run_iati_checks'])
def iati_checks_status(self, status):
return [check for check in self.iati_checks.all() if check.status == status]
def iati_successes(self):
return [check.description for check in self.iati_checks_status(1)]
def iati_successes_unicode(self):
return str(self.iati_successes())
def iati_warnings(self):
return [check.description for check in self.iati_checks_status(2)]
def iati_warnings_unicode(self):
return str(self.iati_warnings())
def iati_errors(self):
return [check.description for check in self.iati_checks_status(3)]
def iati_errors_unicode(self):
return str(self.iati_errors())
def iati_prefixes(self):
"""Return the IATI ID prefixes for the project.
Based on the reporting organisations, returns the IATI prefixes.
"""
from akvo.rsr.models import Organisation
reporting_orgs = self.partnerships.filter(
iati_organisation_role=Partnership.IATI_REPORTING_ORGANISATION
).values_list('organisation_id', flat=True)
org_ids = set(reporting_orgs)
if self.in_eutf_hierarchy():
org_ids.add(settings.EUTF_ORG_ID)
prefixes = Organisation.objects.filter(id__in=org_ids)\
.values_list('iati_prefixes', flat=True)
prefixes = [prefix.strip().strip(';') for prefix in prefixes if prefix is not None]
prefixes = ';'.join([prefix for prefix in prefixes if prefix])
return prefixes.split(';') if prefixes else []
def iati_identifier_context(self):
iati_activity_id_prefix = iati_activity_id_suffix = ''
iati_id = self.iati_activity_id or ''
iati_prefixes = self.iati_prefixes()
for prefix in iati_prefixes:
if iati_id.startswith(prefix):
iati_activity_id_prefix = prefix
break
iati_activity_id_suffix = iati_id[len(iati_activity_id_prefix):]
data = {
'iati_prefixes': iati_prefixes,
'iati_activity_id_prefix': iati_activity_id_prefix,
'iati_activity_id_suffix': iati_activity_id_suffix,
}
return data
def keyword_logos(self):
"""Return the keywords of the project which have a logo."""
return self.keywords.exclude(logo='')
def keyword_labels(self):
return [keyword.label for keyword in self.keywords.all()]
def has_imported_results(self):
Result = apps.get_model('rsr', 'Result')
return Result.objects.filter(project=self).exclude(parent_result=None).count() > 0
def set_parent(self, parent_project_id):
if self.parents_all().exists():
return
RelatedProject.objects.create(
project=self, related_project_id=parent_project_id,
relation=RelatedProject.PROJECT_RELATION_PARENT)
def add_validation_set(self, validation_set):
if validation_set not in self.validations.all():
self.validations.add(validation_set)
###################################
# RSR Impact projects #############
###################################
def import_results(self):
"""Import results from the parent project."""
import_failed = 0
import_success = 1
if self.has_imported_results():
return import_failed, 'Project has already imported results'
if self.parents_all().count() == 1:
parent_project = self.parents_all()[0]
elif self.parents_all().count() == 0:
return import_failed, 'Project does not have a parent project'
else:
return import_failed, 'Project has multiple parent projects'
self.do_import_results(parent_project)
return import_success, 'Results imported'
def do_import_results(self, parent_project):
for dimension_name in parent_project.dimension_names.all():
# Only import dimension names that have not been imported before
if not self.dimension_names.filter(parent_dimension_name=dimension_name).exists():
self.copy_dimension_name(dimension_name)
for result in parent_project.results.all():
# Only import results that have not been imported before
if not self.results.filter(parent_result=result).exists():
self.copy_result(result)
# Copy the default periods after copying the results to not create new
# periods, from the parent, which may already be present from the parent!
for parent_default_period in parent_project.default_periods.all():
if not self.default_periods.filter(parent=parent_default_period).exists():
self.copy_default_period(parent_default_period)
def import_result(self, parent_result_id):
"""Import a specific result from the parent project."""
# Check that we have a parent project and that project of parent
# result is that parent
parents = self.parents_all()
if parents.count() == 0:
raise Project.DoesNotExist("Project has no parent")
elif parents.count() > 1:
raise Project.MultipleObjectsReturned("Project has multiple parents")
else:
parent_project = parents[0]
Result = apps.get_model('rsr', 'Result')
# Check that we have a parent result
parent_result = Result.objects.get(pk=parent_result_id, project=parent_project)
# Check that we don't have an result that has parent_result as parent already.
try:
self.results.get(parent_result=parent_result)
raise ValidationError("Result already exists")
except Result.DoesNotExist:
pass
return self.copy_result(parent_result, set_parent=True)
def import_indicator(self, parent_indicator_id):
"""
:param parent_indicator_id: ID of indicator we want to create a child of in this self's
results framework
:return: new indicator object or None if it couldn't be imported/added
"""
# Check that we have a parent project and that project of parent indicator is that parent
parents = self.parents_all()
if parents.count() == 0:
raise Project.DoesNotExist("Project has no parent")
elif parents.count() > 1:
raise Project.MultipleObjectsReturned("Project has multiple parents")
else:
parent_project = parents[0]
Result = apps.get_model('rsr', 'Result')
Indicator = apps.get_model('rsr', 'Indicator')
# Check that we have a parent indicator
parent_indicator = Indicator.objects.get(pk=parent_indicator_id)
# Check that parent indicator's project is our parent project
parent_result = parent_indicator.result
if parent_result.project != parent_project:
raise ValidationError("Parent indicator's project is not the correct parent project")
# Get or create self.result that has parent_indicator.result as parent_result
result, _created = Result.objects.get_or_create(
project=self,
parent_result=parent_result,
defaults=dict(
title=parent_result.title,
type=parent_result.type,
aggregation_status=parent_result.aggregation_status,
description=parent_result.description,
)
)
# Check that we don't have an indicator that has parent_indicator as parent already.
# This can only happen if result already exists
try:
Indicator.objects.get(result=result, parent_indicator=parent_indicator)
indicator_exists = True
except Indicator.DoesNotExist:
indicator_exists = False
if indicator_exists:
raise ValidationError("Indicator already exists")
return self.copy_indicator(result, parent_indicator, set_parent=True)
def copy_results(self, source_project):
"""Copy results from a source project."""
if self.results.count() > 0:
raise RuntimeError(_('Can copy results only if the results framework is empty.'))
for dimension_name in source_project.dimension_names.all():
self.copy_dimension_name(dimension_name, set_parent=False)
for result in source_project.results.all():
self.copy_result(result, set_parent=False)
for default_period in source_project.default_periods.all():
self.copy_default_period(default_period, set_parent=False)
def copy_dimension_name_to_children(self, dimension_name):
"""Copy dimension_name to all children that imported from this project."""
for child in self.children_all():
if not child.has_imported_results():
continue
child.copy_dimension_name(dimension_name, set_parent=True)
def copy_default_period_to_children(self, default_period):
"""Copy default period to all children that imported results from this project."""
for child in self.children_all():
child.copy_default_period(default_period, set_parent=True)
def copy_default_period(self, parent, set_parent=True):
DefaultPeriod = apps.get_model('rsr', 'DefaultPeriod')
defaults = dict(parent=parent)
data = dict(
project=self, period_start=parent.period_start, period_end=parent.period_end,
defaults=defaults)
if not set_parent:
defaults.pop('parent')
DefaultPeriod.objects.get_or_create(**data)
def copy_dimension_name(self, source_dimension_name, set_parent=True):
defaults = dict(parent_dimension_name=source_dimension_name)
data = dict(project=self, name=source_dimension_name.name, defaults=defaults)
if not set_parent:
defaults.pop('parent_dimension_name')
IndicatorDimensionName = apps.get_model('rsr', 'IndicatorDimensionName')
dimension_name, created = IndicatorDimensionName.objects.get_or_create(**data)
if not created and set_parent:
dimension_name.parent_dimension_name = source_dimension_name
dimension_name.save(update_fields=['parent_dimension_name'])
for dimension_value in source_dimension_name.dimension_values.all():
self.copy_dimension_value(dimension_name, dimension_value, set_parent=set_parent)
return dimension_name
def copy_dimension_value(self, dimension_name, source_dimension_value, set_parent=True):
IndicatorDimensionValue = apps.get_model('rsr', 'IndicatorDimensionValue')
defaults = dict(parent_dimension_value=source_dimension_value)
data = dict(
name=dimension_name,
value=source_dimension_value.value,
defaults=defaults)
if not set_parent:
defaults.pop('parent_dimension_value')
dimension_value, created = IndicatorDimensionValue.objects.get_or_create(**data)
if not created and set_parent:
dimension_value.parent_dimension_value = source_dimension_value
dimension_value.save(update_fields=['parent_dimension_value'])
def copy_result_to_children(self, result):
"""Copy result to all children that imported results from this project."""
for child in self.children_all():
if not child.has_imported_results():
continue
child.copy_result(result, set_parent=True)
def copy_result(self, source_result, set_parent=True):
"""Copy the source_result to this project, setting it as parent if specified."""
data = dict(
project=self,
parent_result=source_result,
title=source_result.title,
type=source_result.type,
aggregation_status=source_result.aggregation_status,
description=source_result.description,
order=source_result.order,
)
if not set_parent:
data.pop('parent_result')
result = apps.get_model('rsr', 'Result').objects.create(**data)
for indicator in source_result.indicators.all():
self.copy_indicator(result, indicator, set_parent=set_parent)
return result
def copy_indicator(self, result, source_indicator, set_parent=True):
"""Copy a source_indicator to the result, setting it as parent if specified.
NOTE: There can only be one child for an indicator, per result. This
method automatically updates an existing child indicator, if present.
It also triggers the creation of periods, dimensions and references on
the indicator, if the indicator is being created and not updated.
"""
Indicator = apps.get_model('rsr', 'Indicator')
data = dict(
title=source_indicator.title,
description=source_indicator.description,
measure=source_indicator.measure,
ascending=source_indicator.ascending,
type=source_indicator.type,
export_to_iati=source_indicator.export_to_iati,
scores=source_indicator.scores,
order=source_indicator.order,
)
if set_parent:
indicator, created = Indicator.objects.update_or_create(
result=result,
parent_indicator=source_indicator,
defaults=data,
)
else:
indicator = Indicator.objects.create(result=result, **data)
created = True
fields = ['baseline_year', 'baseline_value', 'baseline_comment']
self._update_fields_if_not_child_updated(source_indicator, indicator, fields)
if not created:
return indicator
for period in source_indicator.periods.all():
self.copy_period(indicator, period, set_parent=set_parent)
for reference in source_indicator.references.all():
self.add_reference(indicator, reference)
IndicatorDimensionName = apps.get_model('rsr', 'IndicatorDimensionName')
for source_dimension_name in source_indicator.dimension_names.all():
dimension_name = IndicatorDimensionName.objects.filter(
project=self, name=source_dimension_name.name
).first()
indicator.dimension_names.add(dimension_name)
return indicator
def update_indicator(self, result, parent_indicator):
"""Update an indicator based on parent indicator attributes."""
Indicator = apps.get_model('rsr', 'Indicator')
try:
child_indicator = Indicator.objects.get(
result=result,
parent_indicator=parent_indicator,
)
except Indicator.DoesNotExist:
return
update_fields = ['title', 'measure', 'ascending', 'type', 'export_to_iati', 'description',
'order', 'scores']
for field in update_fields:
setattr(child_indicator, field, getattr(parent_indicator, field))
child_indicator.save(update_fields=update_fields)
fields = ['baseline_year', 'baseline_value', 'baseline_comment']
self._update_fields_if_not_child_updated(parent_indicator, child_indicator, fields)
def copy_period(self, indicator, source_period, set_parent=True):
"""Copy the source period to the indicator, and set it as a parent if specified.
NOTE: There can only be one child for a period, per indicator. This
method automatically updates the existing one, if there is one.
"""
IndicatorPeriod = apps.get_model('rsr', 'IndicatorPeriod')
data = dict(
period_start=source_period.period_start,
period_end=source_period.period_end,
)
qs = IndicatorPeriod.objects.select_related('indicator', 'indicator__result')
if set_parent:
qs.update_or_create(indicator=indicator, parent_period=source_period, defaults=data)
else:
qs.create(indicator=indicator, **data)
def update_period(self, indicator, parent_period):
"""Update a period based on the parent period attributes."""
IndicatorPeriod = apps.get_model('rsr', 'IndicatorPeriod')
try:
child_period = IndicatorPeriod.objects.select_related(
'indicator',
'indicator__result',
).get(
indicator=indicator,
parent_period=parent_period,
)
except IndicatorPeriod.DoesNotExist:
return
child_period.period_start = parent_period.period_start
child_period.period_end = parent_period.period_end
child_period.save()
def update_dimension_value(self, dimension_name, parent_dimension_value):
"""Update dimension value base on the parent dimension value attribute."""
IndicatorDimensionValue = apps.get_model('rsr', 'IndicatorDimensionValue')
try:
child_dimension_value = IndicatorDimensionValue.objects.select_related(
'name'
).get(
name=dimension_name,
parent_dimension_value=parent_dimension_value,
)
except IndicatorDimensionValue.DoesNotExist:
return
child_dimension_value.value = parent_dimension_value.value
child_dimension_value.save()
def add_reference(self, indicator, reference):
apps.get_model('rsr', 'IndicatorReference').objects.create(
indicator=indicator,
reference=reference.reference,
vocabulary=reference.vocabulary,
vocabulary_uri=reference.vocabulary_uri,
)
def _update_fields_if_not_child_updated(self, parent, child, fields):
"""Copy the specified fields from parent to child, when empty on the child."""
for field in fields:
parent_value = getattr(parent, field)
if not getattr(child, field) and parent_value:
setattr(child, field, parent_value)
child.save()
def indicator_labels(self):
return apps.get_model('rsr', 'OrganisationIndicatorLabel').objects.filter(
organisation__in=self.all_partners()
).distinct()
def has_indicator_labels(self):
return self.indicator_labels().count() > 0
def toggle_aggregate_children(self, aggregate):
"""
If aggregation to children is turned off,
:param aggregate; Boolean, indicating if aggregation is turned on (True) or off (False)
"""
for result in self.results.all():
for indicator in result.indicators.all():
if indicator.is_parent_indicator():
for period in indicator.periods.all():
if indicator.measure == '2':
self.update_parents(period, period.child_periods_average(), 1)
else:
sign = 1 if aggregate else -1
self.update_parents(period, period.child_periods_sum(), sign)
def toggle_aggregate_to_parent(self, aggregate):
""" Add/subtract child indicator period values from parent if aggregation is toggled """
for result in self.results.all():
for indicator in result.indicators.all():
if indicator.is_child_indicator():
for period in indicator.periods.all():
parent = period.parent_period
if parent and period.actual_value:
if indicator.measure == '2':
self.update_parents(parent, parent.child_periods_average(), 1)
else:
sign = 1 if aggregate else -1
self.update_parents(parent, period.actual_value, sign)
def update_parents(self, update_period, difference, sign):
""" Update parent indicator periods if they exist and allow aggregation """
try:
if update_period.indicator.measure == '2':
update_period.actual_value = str(Decimal(difference))
else:
update_period.actual_value = str(
Decimal(update_period.actual_value) + sign * Decimal(difference))
update_period.save()
parent_period = update_period.parent_period
if parent_period and parent_period.indicator.result.project.aggregate_children:
if update_period.indicator.measure == '2':
self.update_parents(parent_period, parent_period.child_periods_average(), 1)
else:
self.update_parents(parent_period, difference, sign)
except (InvalidOperation, TypeError):
pass
def update_use_project_roles(self):
if not self.reporting_org:
return
if self.reporting_org.use_project_roles == self.use_project_roles:
return
# We only wish to turn on the project roles flag on the project, if the
# reporting organisation has that flag turned on. If the project
# already has the flag turned on, we don't want to turn it off
# implicitly, based on the reporting organisation. There has to be a
# more explicit way of turning this off, for the user.
if self.reporting_org.use_project_roles and not self.use_project_roles:
self.use_project_roles = True
self.save(update_fields=['use_project_roles'])
@classmethod
def log_project_addition(cls, project_id, user):
project = cls.objects.get(id=project_id)
message = '%s.' % (_('Project editor, added project'))
LogEntry.objects.log_action(
user_id=user.pk,
content_type_id=ContentType.objects.get_for_model(project).pk,
object_id=project.pk,
object_repr=str(project),
action_flag=ADDITION,
change_message=message
)
# Schedule IATI checks after a project has been created.
project.schedule_iati_checks()
@staticmethod
def add_custom_fields(project_id, organisations):
from akvo.rsr.models import OrganisationCustomField, ProjectCustomField
custom_fields = OrganisationCustomField.objects.filter(
organisation__in=organisations
)
project_custom_fields = [
custom_field.new_project_custom_field(project_id) for custom_field in custom_fields
]
ProjectCustomField.objects.bulk_create(project_custom_fields)
@classmethod
def new_project_created(cls, project_id, user):
"""Hook to do some book-keeping for a newly created project.
*NOTE*: This hook cannot be moved into a post-save hook since we need
information about the user who created this project, to perform some of
the actions.
"""
# Set reporting organisation
organisations = [e.organisation for e in user.approved_employments().order_by('id')]
can_create_project_orgs = [
org for org in organisations
if org.can_create_projects and user.has_perm('rsr.add_project', org)
]
if can_create_project_orgs:
# FIXME: We randomly choose the first organisation, where the user
# can create projects, when ordered by employments
organisation_id = organisations[0].id
from akvo.rsr.models import Partnership
Partnership.objects.create(
project_id=project_id,
organisation_id=organisation_id,
iati_organisation_role=Partnership.IATI_REPORTING_ORGANISATION
)
Project.log_project_addition(project_id, user)
organisation_ids = [org.id for org in organisations]
Project.add_custom_fields(project_id, organisation_ids)
def users_with_access(self, group_name=None):
if self.use_project_roles:
qs = self.projectrole_set.all()
else:
# NOTE: We deliberately keep the access simple here - we only look
# for users employed by direct partners, and don't worry about
# content-owned organisations or users employed by project hierarchy
# owner organisation, etc.
qs = self.partners.employments()
if group_name is not None:
qs = qs.filter(group__name=group_name)
user_ids = qs.values_list('user__id', flat=True)
User = get_user_model()
return User.objects.filter(pk__in=user_ids)
def project_directory_cache_key(project_id):
return f'project_directory_{project_id}'
@receiver(post_save, sender=Project)
def default_validation_set(sender, **kwargs):
"""When the project is created, add the RSR validation (pk=1) to the project."""
# Disable signal handler when loading fixtures
if kwargs.get('raw', False):
return
project = kwargs['instance']
created = kwargs['created']
if created:
try:
if not project.validations.all():
project.validations.add(ProjectEditorValidationSet.objects.get(pk=1))
except ProjectEditorValidationSet.DoesNotExist:
# RSR validation set does not exist, should not happen..
send_mail('RSR validation set missing',
'This is a notification to inform the RSR admins that the RSR validation set '
'(pk=1) is missing.',
settings.DEFAULT_FROM_EMAIL,
getattr(settings, "SUPPORT_EMAIL", ['rsr@akvo.org']))
@receiver(post_save, sender=ProjectUpdate)
def update_denormalized_project(sender, **kwargs):
"Updates the denormalized project.last_update on related project."
# Disable signal handler when loading fixtures
if kwargs.get('raw', False):
return
project_update = kwargs['instance']
project = project_update.project
project.last_update = project_update
project.save()
@receiver(post_delete, sender=ProjectUpdate)
def rewind_last_update(sender, **kwargs):
""" Updates the denormalized project.last_update on related project
When deleting an update we have to set project.last_update again since it'll change if the
deleted update was tha latest or if it was the only update for the project
"""
# Disable signal handler when loading fixtures
if kwargs.get('raw', False):
return
project_update = kwargs['instance']
project = project_update.project
try:
project.last_update = project.updates_desc()[0]
except IndexError:
project.last_update = None
project.save()
|
akvo/akvo-rsr
|
akvo/rsr/models/project.py
|
Python
|
agpl-3.0
| 77,934
|
[
"VisIt"
] |
ce6344f8b7ed38abe2f97d62d789e101369df936fd18721b15e2c123174310ba
|
# Copyright 2008-2014 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from robot.errors import DataError
from robot.model import SuiteVisitor
from robot.utils import Matcher, plural_or_not
def KeywordRemover(how):
upper = how.upper()
if upper.startswith('NAME:'):
return ByNameKeywordRemover(pattern=how[5:])
try:
return {'ALL': AllKeywordsRemover,
'PASSED': PassedKeywordRemover,
'FOR': ForLoopItemsRemover,
'WUKS': WaitUntilKeywordSucceedsRemover}[upper]()
except KeyError:
raise DataError("Expected 'ALL', 'PASSED', 'NAME:<pattern>', 'FOR', "
"or 'WUKS' but got '%s'." % how)
class _KeywordRemover(SuiteVisitor):
_message = 'Keyword data removed using --RemoveKeywords option.'
def __init__(self):
self._removal_message = RemovalMessage(self._message)
def _clear_content(self, kw):
kw.keywords = []
kw.messages = []
self._removal_message.set(kw)
def _failed_or_contains_warning(self, item):
return not item.passed or self._contains_warning(item)
def _contains_warning(self, item):
contains_warning = ContainsWarning()
item.visit(contains_warning)
return contains_warning.result
class AllKeywordsRemover(_KeywordRemover):
def visit_keyword(self, keyword):
self._clear_content(keyword)
class PassedKeywordRemover(_KeywordRemover):
def start_suite(self, suite):
if not suite.statistics.all.failed:
for keyword in suite.keywords:
if not self._contains_warning(keyword):
self._clear_content(keyword)
def visit_test(self, test):
if not self._failed_or_contains_warning(test):
for keyword in test.keywords:
self._clear_content(keyword)
def visit_keyword(self, keyword):
pass
class ByNameKeywordRemover(_KeywordRemover):
def __init__(self, pattern):
_KeywordRemover.__init__(self)
self._matcher = Matcher(pattern, ignore='_')
def start_keyword(self, kw):
if self._matcher.match(kw.name) and not self._contains_warning(kw):
self._clear_content(kw)
class ForLoopItemsRemover(_KeywordRemover):
_message = '%d passing step%s removed using --RemoveKeywords option.'
def start_keyword(self, kw):
if kw.type == kw.FOR_LOOP_TYPE:
before = len(kw.keywords)
kw.keywords = self._remove_keywords(kw.keywords)
self._removal_message.set_if_removed(kw, before)
def _remove_keywords(self, keywords):
return [kw for kw in keywords
if self._failed_or_contains_warning(kw) or kw is keywords[-1]]
class WaitUntilKeywordSucceedsRemover(_KeywordRemover):
_message = '%d failing step%s removed using --RemoveKeywords option.'
def start_keyword(self, kw):
if kw.name == 'BuiltIn.Wait Until Keyword Succeeds' and kw.keywords:
before = len(kw.keywords)
kw.keywords = self._remove_keywords(list(kw.keywords))
self._removal_message.set_if_removed(kw, before)
def _remove_keywords(self, keywords):
include_from_end = 2 if keywords[-1].passed else 1
return self._kws_with_warnings(keywords[:-include_from_end]) \
+ keywords[-include_from_end:]
def _kws_with_warnings(self, keywords):
return [kw for kw in keywords if self._contains_warning(kw)]
class ContainsWarning(SuiteVisitor):
def __init__(self):
self.result = False
def start_suite(self, suite):
return not self.result
def start_test(self, test):
return not self.result
def start_keyword(self, keyword):
return not self.result
def visit_message(self, msg):
if msg.level == 'WARN':
self.result = True
class RemovalMessage(object):
def __init__(self, message):
self._message = message
def set_if_removed(self, kw, len_before):
removed = len_before - len(kw.keywords)
if removed:
self.set(kw, self._message % (removed, plural_or_not(removed)))
def set(self, kw, message=None):
kw.doc = ('%s\n\n_%s_' % (kw.doc, message or self._message)).strip()
|
ldtri0209/robotframework
|
src/robot/result/keywordremover.py
|
Python
|
apache-2.0
| 4,811
|
[
"VisIt"
] |
db0fd60a0a1b68fa03b9a40d81536bd8efc899d2cb0e7947c2264f21c96c2bef
|
#!/usr/bin/env python
##################################################
## DEPENDENCIES
import sys
import os
import os.path
try:
import builtins as builtin
except ImportError:
import __builtin__ as builtin
from os.path import getmtime, exists
import time
import types
from Cheetah.Version import MinCompatibleVersion as RequiredCheetahVersion
from Cheetah.Version import MinCompatibleVersionTuple as RequiredCheetahVersionTuple
from Cheetah.Template import Template
from Cheetah.DummyTransaction import *
from Cheetah.NameMapper import NotFound, valueForName, valueFromSearchList, valueFromFrameOrSearchList
from Cheetah.CacheRegion import CacheRegion
import Cheetah.Filters as Filters
import Cheetah.ErrorCatchers as ErrorCatchers
from Plugins.Extensions.OpenWebif.local import tstrings
##################################################
## MODULE CONSTANTS
VFFSL=valueFromFrameOrSearchList
VFSL=valueFromSearchList
VFN=valueForName
currentTime=time.time
__CHEETAH_version__ = '2.4.4'
__CHEETAH_versionTuple__ = (2, 4, 4, 'development', 0)
__CHEETAH_genTime__ = 1406885499.377351
__CHEETAH_genTimestamp__ = 'Fri Aug 1 18:31:39 2014'
__CHEETAH_src__ = '/home/wslee2/models/5-wo/force1plus/openpli3.0/build-force1plus/tmp/work/mips32el-oe-linux/enigma2-plugin-extensions-openwebif-1+git5+3c0c4fbdb28d7153bf2140459b553b3d5cdd4149-r0/git/plugin/controllers/views/ajax/boxinfo.tmpl'
__CHEETAH_srcLastModified__ = 'Fri Aug 1 18:30:05 2014'
__CHEETAH_docstring__ = 'Autogenerated by Cheetah: The Python-Powered Template Engine'
if __CHEETAH_versionTuple__ < RequiredCheetahVersionTuple:
raise AssertionError(
'This template was compiled with Cheetah version'
' %s. Templates compiled before version %s must be recompiled.'%(
__CHEETAH_version__, RequiredCheetahVersion))
##################################################
## CLASSES
class boxinfo(Template):
##################################################
## CHEETAH GENERATED METHODS
def __init__(self, *args, **KWs):
super(boxinfo, self).__init__(*args, **KWs)
if not self._CHEETAH__instanceInitialized:
cheetahKWArgs = {}
allowedKWs = 'searchList namespaces filter filtersLib errorCatcher'.split()
for k,v in KWs.items():
if k in allowedKWs: cheetahKWArgs[k] = v
self._initCheetahInstance(**cheetahKWArgs)
def respond(self, trans=None):
## CHEETAH: main method generated for this template
if (not trans and not self._CHEETAH__isBuffering and not callable(self.transaction)):
trans = self.transaction # is None unless self.awake() was called
if not trans:
trans = DummyTransaction()
_dummyTrans = True
else: _dummyTrans = False
write = trans.response().write
SL = self._CHEETAH__searchList
_filter = self._CHEETAH__currentFilter
########################################
## START - generated method body
write(u'''<!-- box_info -->
<div id="content_main">
\t<div id="info">
\t\t<h3>''')
_v = VFFSL(SL,"tstrings",True)['box_info'] # u"$tstrings['box_info']" on line 5, col 7
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['box_info']")) # from line 5, col 7.
write(u'''</h3>
\t\t<hr />
\t\t<img src="images/boxes/''')
_v = VFFSL(SL,"boximage",True) # u'${boximage}' on line 7, col 26
if _v is not None: write(_filter(_v, rawExpr=u'${boximage}')) # from line 7, col 26.
write(u'''" id="box_image" alt="box_info">
\t\t<hr />
\t\t<br/>
\t\t<table width="100%">
\t\t\t<tr>
\t\t\t\t<td width="100%">
\t\t\t\t\t<table cellspacing="0" class="infomain" >
\t\t\t\t\t\t<tr>
\t\t\t\t\t\t\t<th colspan="2" class="infoHeader">''')
_v = VFFSL(SL,"tstrings",True)['box'] # u"$tstrings['box']" on line 15, col 43
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['box']")) # from line 15, col 43.
write(u'''</th>
\t\t\t\t\t\t</tr>
\t\t\t\t\t\t<tr>
\t\t\t\t\t\t\t<td class="infoleft">''')
_v = VFFSL(SL,"tstrings",True)['brand'] # u"$tstrings['brand']" on line 18, col 29
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['brand']")) # from line 18, col 29.
write(u''':</td>
\t\t\t\t\t\t\t<td class="inforight">''')
_v = VFFSL(SL,"brand",True) # u'$brand' on line 19, col 30
if _v is not None: write(_filter(_v, rawExpr=u'$brand')) # from line 19, col 30.
write(u'''</td>
\t\t\t\t\t\t</tr>
\t\t\t\t\t\t<tr>
\t\t\t\t\t\t\t<td class="infoleft">''')
_v = VFFSL(SL,"tstrings",True)['model'] # u"$tstrings['model']" on line 22, col 29
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['model']")) # from line 22, col 29.
write(u''':</td>
\t\t\t\t\t\t\t<td class="inforight">''')
_v = VFFSL(SL,"model",True) # u'$model' on line 23, col 30
if _v is not None: write(_filter(_v, rawExpr=u'$model')) # from line 23, col 30.
write(u'''</td>
\t\t\t\t\t\t</tr>
\t\t\t\t\t\t<tr>
\t\t\t\t\t\t\t<td class="infoleft">''')
_v = VFFSL(SL,"tstrings",True)['chipset'] # u"$tstrings['chipset']" on line 26, col 29
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['chipset']")) # from line 26, col 29.
write(u''':</td>
\t\t\t\t\t\t\t<td class="inforight">''')
_v = VFFSL(SL,"chipset",True) # u'$chipset' on line 27, col 30
if _v is not None: write(_filter(_v, rawExpr=u'$chipset')) # from line 27, col 30.
write(u'''</td>
\t\t\t\t\t\t</tr>
\t\t\t\t\t\t<tr>
\t\t\t\t\t\t\t<td class="infoleft">''')
_v = VFFSL(SL,"tstrings",True)['fp_version'] # u"$tstrings['fp_version']" on line 30, col 29
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['fp_version']")) # from line 30, col 29.
write(u''':</td>
\t\t\t\t\t\t\t<td class="inforight">''')
_v = VFFSL(SL,"str",False)(VFFSL(SL,"fp_version",True)) # u'$str($fp_version)' on line 31, col 30
if _v is not None: write(_filter(_v, rawExpr=u'$str($fp_version)')) # from line 31, col 30.
write(u'''</td>
\t\t\t\t\t\t</tr>
\t\t\t\t\t\t<tr>
\t\t\t\t\t\t\t<td class="infoleft">''')
_v = VFFSL(SL,"tstrings",True)['total_memory'] # u"$tstrings['total_memory']" on line 34, col 29
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['total_memory']")) # from line 34, col 29.
write(u''':</td>
\t\t\t\t\t\t\t<td class="inforight">''')
_v = VFFSL(SL,"mem1",True) # u'$mem1' on line 35, col 30
if _v is not None: write(_filter(_v, rawExpr=u'$mem1')) # from line 35, col 30.
write(u'''</td>
\t\t\t\t\t\t</tr>
\t\t\t\t\t\t<tr>
\t\t\t\t\t\t\t<td class="infoleft">''')
_v = VFFSL(SL,"tstrings",True)['free_memory'] # u"$tstrings['free_memory']" on line 38, col 29
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['free_memory']")) # from line 38, col 29.
write(u''':</td>
\t\t\t\t\t\t\t<td class="inforight">''')
_v = VFFSL(SL,"mem2",True) # u'$mem2' on line 39, col 30
if _v is not None: write(_filter(_v, rawExpr=u'$mem2')) # from line 39, col 30.
write(u'''</td>
\t\t\t\t\t\t</tr>
\t\t\t\t\t\t<tr>
\t\t\t\t\t\t\t<td class="infoleft">''')
_v = VFFSL(SL,"tstrings",True)['box_uptime'] # u"$tstrings['box_uptime']" on line 42, col 29
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['box_uptime']")) # from line 42, col 29.
write(u''':</td>
\t\t\t\t\t\t\t<td class="inforight">''')
_v = VFFSL(SL,"uptime",True) # u'$uptime' on line 43, col 30
if _v is not None: write(_filter(_v, rawExpr=u'$uptime')) # from line 43, col 30.
write(u'''</td>
\t\t\t\t\t\t</tr>
\t\t\t\t\t</table>
\t\t\t\t</td>
\t\t\t</tr>
\t\t\t<tr>
\t\t\t\t<td width="100%">
\t\t\t\t\t<table cellspacing="0" class="infomain" >
\t\t\t\t\t\t<tr>
\t\t\t\t\t\t\t<th colspan="2" class="infoHeader">''')
_v = VFFSL(SL,"tstrings",True)['software'] # u"$tstrings['software']" on line 52, col 43
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['software']")) # from line 52, col 43.
write(u'''</th>
\t\t\t\t\t\t</tr>
\t\t\t\t\t\t<tr>
\t\t\t\t\t\t\t<td class="infoleft">''')
_v = VFFSL(SL,"tstrings",True)['kernel_version'] # u"$tstrings['kernel_version']" on line 55, col 29
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['kernel_version']")) # from line 55, col 29.
write(u''':</td>
\t\t\t\t\t\t\t<td class="inforight">''')
_v = VFFSL(SL,"kernelver",True) # u'$kernelver' on line 56, col 30
if _v is not None: write(_filter(_v, rawExpr=u'$kernelver')) # from line 56, col 30.
write(u'''</td>
\t\t\t\t\t\t</tr>
\t\t\t\t\t\t<tr>
\t\t\t\t\t\t\t<td class="infoleft">''')
_v = VFFSL(SL,"tstrings",True)['firmware_version'] # u"$tstrings['firmware_version']" on line 59, col 29
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['firmware_version']")) # from line 59, col 29.
write(u''':</td>
\t\t\t\t\t\t\t<td class="inforight">''')
_v = VFFSL(SL,"imagever",True) # u'$imagever' on line 60, col 30
if _v is not None: write(_filter(_v, rawExpr=u'$imagever')) # from line 60, col 30.
write(u'''</td>
\t\t\t\t\t\t</tr>
\t\t\t\t\t\t<tr>
\t\t\t\t\t\t\t<td class="infoleft">''')
_v = VFFSL(SL,"tstrings",True)['gui_version'] # u"$tstrings['gui_version']" on line 63, col 29
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['gui_version']")) # from line 63, col 29.
write(u''':</td>
\t\t\t\t\t\t\t<td class="inforight">''')
_v = VFFSL(SL,"enigmaver",True) # u'$enigmaver' on line 64, col 30
if _v is not None: write(_filter(_v, rawExpr=u'$enigmaver')) # from line 64, col 30.
write(u'''</td>
\t\t\t\t\t\t</tr>
\t\t\t\t\t</table>
\t\t\t\t</td>
\t\t\t</tr>
\t\t\t<tr>
\t\t\t\t<td width="100%">
\t\t\t\t\t<table cellspacing="0" class="infomain" >
\t\t\t\t\t\t<tr>
\t\t\t\t\t\t\t<th colspan="2" class="infoHeader">''')
_v = VFFSL(SL,"tstrings",True)['tuners'] # u"$tstrings['tuners']" on line 73, col 43
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['tuners']")) # from line 73, col 43.
write(u'''</th>
\t\t\t\t\t\t</tr>
''')
for tuner in VFFSL(SL,"tuners",True): # generated from line 75, col 7
write(u'''\t\t\t\t\t\t<tr>
\t\t\t\t\t\t\t<td class="infoleft">''')
_v = VFFSL(SL,"tuner.name",True) # u'$tuner.name' on line 77, col 29
if _v is not None: write(_filter(_v, rawExpr=u'$tuner.name')) # from line 77, col 29.
write(u''':</td>
\t\t\t\t\t\t\t<td class="inforight">''')
_v = VFFSL(SL,"tuner.type",True) # u'$tuner.type' on line 78, col 30
if _v is not None: write(_filter(_v, rawExpr=u'$tuner.type')) # from line 78, col 30.
write(u'''</td>
\t\t\t\t\t\t</tr>
''')
write(u'''\t\t\t\t\t</table>
\t\t\t\t</td>
\t\t\t</tr>
''')
for hd in VFFSL(SL,"hdd",True): # generated from line 84, col 4
write(u'''\t\t\t<tr>
\t\t\t\t<td width="100%">
\t\t\t\t\t<table cellspacing="0" class="infomain" >
\t\t\t\t\t\t<tr>
\t\t\t\t\t\t\t<th colspan="2" class="infoHeader">''')
_v = VFFSL(SL,"tstrings",True)['hdd_model'] # u"$tstrings['hdd_model']" on line 89, col 43
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['hdd_model']")) # from line 89, col 43.
write(u''': ''')
_v = VFFSL(SL,"hd.model",True) # u'$hd.model' on line 89, col 67
if _v is not None: write(_filter(_v, rawExpr=u'$hd.model')) # from line 89, col 67.
write(u'''</th>
\t\t\t\t\t\t</tr>
\t\t\t\t\t\t<tr>
\t\t\t\t\t\t\t<td class="infoleft">''')
_v = VFFSL(SL,"tstrings",True)['capacity'] # u"$tstrings['capacity']" on line 92, col 29
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['capacity']")) # from line 92, col 29.
write(u''':</td>
\t\t\t\t\t\t\t<td class="inforight">''')
_v = VFFSL(SL,"hd.capacity",True) # u'$hd.capacity' on line 93, col 30
if _v is not None: write(_filter(_v, rawExpr=u'$hd.capacity')) # from line 93, col 30.
write(u'''</td>
\t\t\t\t\t\t</tr>
\t\t\t\t\t\t<tr>
\t\t\t\t\t\t\t<td class="infoleft">''')
_v = VFFSL(SL,"tstrings",True)['free'] # u"$tstrings['free']" on line 96, col 29
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['free']")) # from line 96, col 29.
write(u''':</td>
\t\t\t\t\t\t\t<td class="inforight">''')
_v = VFFSL(SL,"hd.free",True) # u'$hd.free' on line 97, col 30
if _v is not None: write(_filter(_v, rawExpr=u'$hd.free')) # from line 97, col 30.
write(u'''</td>
\t\t\t\t\t\t</tr>
\t\t\t\t\t</table>
\t\t\t\t</td>
\t\t\t</tr>
''')
for iface in VFFSL(SL,"ifaces",True): # generated from line 103, col 4
write(u'''\t\t\t<tr>
\t\t\t\t<td width="100%">
\t\t\t\t\t<table cellspacing="0" class="infomain" >
\t\t\t\t\t\t<tr>
\t\t\t\t\t\t\t<th colspan="2" class="infoHeader">''')
_v = VFFSL(SL,"tstrings",True)['network_interface'] # u"$tstrings['network_interface']" on line 108, col 43
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['network_interface']")) # from line 108, col 43.
write(u''': ''')
_v = VFFSL(SL,"iface.name",True) # u'$iface.name' on line 108, col 75
if _v is not None: write(_filter(_v, rawExpr=u'$iface.name')) # from line 108, col 75.
write(u'''</th>
\t\t\t\t\t\t</tr>
\t\t\t\t\t\t<tr>
\t\t\t\t\t\t\t<td class="infoleft">''')
_v = VFFSL(SL,"tstrings",True)['dhcp'] # u"$tstrings['dhcp']" on line 111, col 29
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['dhcp']")) # from line 111, col 29.
write(u''':</td>
\t\t\t\t\t\t\t<td class="inforight">''')
_v = VFFSL(SL,"iface.dhcp",True) # u'$iface.dhcp' on line 112, col 30
if _v is not None: write(_filter(_v, rawExpr=u'$iface.dhcp')) # from line 112, col 30.
write(u'''</td>
\t\t\t\t\t\t</tr>
\t\t\t\t\t\t<tr>
\t\t\t\t\t\t\t<td class="infoleft">''')
_v = VFFSL(SL,"tstrings",True)['ip_address'] # u"$tstrings['ip_address']" on line 115, col 29
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['ip_address']")) # from line 115, col 29.
write(u''':</td>
\t\t\t\t\t\t\t<td class="inforight">''')
_v = VFFSL(SL,"iface.ip",True) # u'$iface.ip' on line 116, col 30
if _v is not None: write(_filter(_v, rawExpr=u'$iface.ip')) # from line 116, col 30.
write(u'''</td>
\t\t\t\t\t\t</tr>
\t\t\t\t\t\t<tr>
\t\t\t\t\t\t\t<td class="infoleft">''')
_v = VFFSL(SL,"tstrings",True)['subnet_mask'] # u"$tstrings['subnet_mask']" on line 119, col 29
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['subnet_mask']")) # from line 119, col 29.
write(u''':</td>
\t\t\t\t\t\t\t<td class="inforight">''')
_v = VFFSL(SL,"iface.mask",True) # u'$iface.mask' on line 120, col 30
if _v is not None: write(_filter(_v, rawExpr=u'$iface.mask')) # from line 120, col 30.
write(u'''</td>
\t\t\t\t\t\t</tr>
\t\t\t\t\t\t<tr>
\t\t\t\t\t\t\t<td class="infoleft">''')
_v = VFFSL(SL,"tstrings",True)['gateway'] # u"$tstrings['gateway']" on line 123, col 29
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['gateway']")) # from line 123, col 29.
write(u''':</td>
\t\t\t\t\t\t\t<td class="inforight">''')
_v = VFFSL(SL,"iface.gw",True) # u'$iface.gw' on line 124, col 30
if _v is not None: write(_filter(_v, rawExpr=u'$iface.gw')) # from line 124, col 30.
write(u'''</td>
\t\t\t\t\t\t</tr>
\t\t\t\t\t\t<tr>
\t\t\t\t\t\t\t<td class="infoleft">''')
_v = VFFSL(SL,"tstrings",True)['mac_address'] # u"$tstrings['mac_address']" on line 127, col 29
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['mac_address']")) # from line 127, col 29.
write(u''':</td>
\t\t\t\t\t\t\t<td class="inforight">''')
_v = VFFSL(SL,"iface.mac",True) # u'$iface.mac' on line 128, col 30
if _v is not None: write(_filter(_v, rawExpr=u'$iface.mac')) # from line 128, col 30.
write(u'''</td>
\t\t\t\t\t\t</tr>
\t\t\t\t\t\t<tr>
\t\t\t\t\t\t\t<td class="infoleft">''')
_v = VFFSL(SL,"tstrings",True)['ipv6_address'] # u"$tstrings['ipv6_address']" on line 131, col 29
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['ipv6_address']")) # from line 131, col 29.
write(u''':</td>
\t\t\t\t\t\t\t<td class="inforight">''')
_v = VFFSL(SL,"iface.ipv6",True) # u'$iface.ipv6' on line 132, col 30
if _v is not None: write(_filter(_v, rawExpr=u'$iface.ipv6')) # from line 132, col 30.
write(u'''</td>
\t\t\t\t\t\t</tr>
\t\t\t\t\t</table>
\t\t\t\t</td>
\t\t\t</tr>
''')
write(u'''\t\t</table>
\t</div>
</div>\t
<!-- /box_info -->
''')
########################################
## END - generated method body
return _dummyTrans and trans.response().getvalue() or ""
##################################################
## CHEETAH GENERATED ATTRIBUTES
_CHEETAH__instanceInitialized = False
_CHEETAH_version = __CHEETAH_version__
_CHEETAH_versionTuple = __CHEETAH_versionTuple__
_CHEETAH_genTime = __CHEETAH_genTime__
_CHEETAH_genTimestamp = __CHEETAH_genTimestamp__
_CHEETAH_src = __CHEETAH_src__
_CHEETAH_srcLastModified = __CHEETAH_srcLastModified__
_mainCheetahMethod_for_boxinfo= 'respond'
## END CLASS DEFINITION
if not hasattr(boxinfo, '_initCheetahAttributes'):
templateAPIClass = getattr(boxinfo, '_CHEETAH_templateClass', Template)
templateAPIClass._addCheetahPlumbingCodeToClass(boxinfo)
# CHEETAH was developed by Tavis Rudd and Mike Orr
# with code, advice and input from many other volunteers.
# For more information visit http://www.CheetahTemplate.org/
##################################################
## if run from command line:
if __name__ == '__main__':
from Cheetah.TemplateCmdLineIface import CmdLineIface
CmdLineIface(templateObj=boxinfo()).run()
|
MOA-2011/enigma2-plugin-extensions-openwebif
|
plugin/controllers/views/ajax/boxinfo.py
|
Python
|
gpl-2.0
| 18,406
|
[
"VisIt"
] |
bce0ba3579e6eff578a1ed124b5f9a80192d1fd1a29f7fa18b0ae87d196fd548
|
"""
Test functions for models.GLM
"""
import os
import warnings
import numpy as np
from numpy.testing import (
assert_,
assert_allclose,
assert_almost_equal,
assert_array_less,
assert_equal,
assert_raises,
)
import pandas as pd
from pandas.testing import assert_series_equal
import pytest
from scipy import stats
import statsmodels.api as sm
from statsmodels.datasets import cpunish, longley
from statsmodels.discrete import discrete_model as discrete
from statsmodels.genmod.generalized_linear_model import GLM, SET_USE_BIC_LLF
from statsmodels.tools.numdiff import (
approx_fprime,
approx_fprime_cs,
approx_hess,
approx_hess_cs,
)
from statsmodels.tools.sm_exceptions import (
DomainWarning,
PerfectSeparationError,
ValueWarning,
)
from statsmodels.tools.tools import add_constant
# Test Precisions
DECIMAL_4 = 4
DECIMAL_3 = 3
DECIMAL_2 = 2
DECIMAL_1 = 1
DECIMAL_0 = 0
pdf_output = False
if pdf_output:
from matplotlib.backends.backend_pdf import PdfPages
pdf = PdfPages("test_glm.pdf")
else:
pdf = None
def close_or_save(pdf, fig):
if pdf_output:
pdf.savefig(fig)
def teardown_module():
if pdf_output:
pdf.close()
@pytest.fixture(scope="module")
def iris():
cur_dir = os.path.dirname(os.path.abspath(__file__))
return np.genfromtxt(os.path.join(cur_dir, 'results', 'iris.csv'),
delimiter=",", skip_header=1)
class CheckModelResultsMixin(object):
'''
res2 should be either the results from RModelWrap
or the results as defined in model_results_data
'''
decimal_params = DECIMAL_4
def test_params(self):
assert_almost_equal(self.res1.params, self.res2.params,
self.decimal_params)
decimal_bse = DECIMAL_4
def test_standard_errors(self):
assert_allclose(self.res1.bse, self.res2.bse,
atol=10**(-self.decimal_bse), rtol=1e-5)
decimal_resids = DECIMAL_4
def test_residuals(self):
# fix incorrect numbers in resid_working results
# residuals for Poisson are also tested in test_glm_weights.py
import copy
# new numpy would have copy method
resid2 = copy.copy(self.res2.resids)
resid2[:, 2] *= self.res1.family.link.deriv(self.res1.mu)**2
atol = 10**(-self.decimal_resids)
resid_a = self.res1.resid_anscombe_unscaled
resids = np.column_stack((self.res1.resid_pearson,
self.res1.resid_deviance, self.res1.resid_working,
resid_a, self.res1.resid_response))
assert_allclose(resids, resid2, rtol=1e-6, atol=atol)
decimal_aic_R = DECIMAL_4
def test_aic_R(self):
# R includes the estimation of the scale as a lost dof
# Does not with Gamma though
if self.res1.scale != 1:
dof = 2
else:
dof = 0
if isinstance(self.res1.model.family, (sm.families.NegativeBinomial)):
llf = self.res1.model.family.loglike(self.res1.model.endog,
self.res1.mu,
self.res1.model.var_weights,
self.res1.model.freq_weights,
scale=1)
aic = (-2*llf+2*(self.res1.df_model+1))
else:
aic = self.res1.aic
assert_almost_equal(aic+dof, self.res2.aic_R,
self.decimal_aic_R)
decimal_aic_Stata = DECIMAL_4
def test_aic_Stata(self):
# Stata uses the below llf for aic definition for these families
if isinstance(self.res1.model.family, (sm.families.Gamma,
sm.families.InverseGaussian,
sm.families.NegativeBinomial)):
llf = self.res1.model.family.loglike(self.res1.model.endog,
self.res1.mu,
self.res1.model.var_weights,
self.res1.model.freq_weights,
scale=1)
aic = (-2*llf+2*(self.res1.df_model+1))/self.res1.nobs
else:
aic = self.res1.aic/self.res1.nobs
assert_almost_equal(aic, self.res2.aic_Stata, self.decimal_aic_Stata)
decimal_deviance = DECIMAL_4
def test_deviance(self):
assert_almost_equal(self.res1.deviance, self.res2.deviance,
self.decimal_deviance)
decimal_scale = DECIMAL_4
def test_scale(self):
assert_almost_equal(self.res1.scale, self.res2.scale,
self.decimal_scale)
decimal_loglike = DECIMAL_4
def test_loglike(self):
# Stata uses the below llf for these families
# We differ with R for them
if isinstance(self.res1.model.family, (sm.families.Gamma,
sm.families.InverseGaussian,
sm.families.NegativeBinomial)):
llf = self.res1.model.family.loglike(self.res1.model.endog,
self.res1.mu,
self.res1.model.var_weights,
self.res1.model.freq_weights,
scale=1)
else:
llf = self.res1.llf
assert_almost_equal(llf, self.res2.llf, self.decimal_loglike)
decimal_null_deviance = DECIMAL_4
def test_null_deviance(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore", DomainWarning)
assert_almost_equal(self.res1.null_deviance,
self.res2.null_deviance,
self.decimal_null_deviance)
decimal_bic = DECIMAL_4
def test_bic(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
assert_almost_equal(self.res1.bic,
self.res2.bic_Stata,
self.decimal_bic)
def test_degrees(self):
assert_equal(self.res1.model.df_resid,self.res2.df_resid)
decimal_fittedvalues = DECIMAL_4
def test_fittedvalues(self):
assert_almost_equal(self.res1.fittedvalues, self.res2.fittedvalues,
self.decimal_fittedvalues)
def test_tpvalues(self):
# test comparing tvalues and pvalues with normal implementation
# make sure they use normal distribution (inherited in results class)
params = self.res1.params
tvalues = params / self.res1.bse
pvalues = stats.norm.sf(np.abs(tvalues)) * 2
half_width = stats.norm.isf(0.025) * self.res1.bse
conf_int = np.column_stack((params - half_width, params + half_width))
if isinstance(tvalues, pd.Series):
assert_series_equal(self.res1.tvalues, tvalues)
else:
assert_almost_equal(self.res1.tvalues, tvalues)
assert_almost_equal(self.res1.pvalues, pvalues)
assert_almost_equal(self.res1.conf_int(), conf_int)
def test_pearson_chi2(self):
if hasattr(self.res2, 'pearson_chi2'):
assert_allclose(self.res1.pearson_chi2, self.res2.pearson_chi2,
atol=1e-6, rtol=1e-6)
def test_prsquared(self):
if hasattr(self.res2, 'prsquared'):
assert_allclose(self.res1.pseudo_rsquared(kind="mcf"),
self.res2.prsquared, rtol=0.05)
if hasattr(self.res2, 'prsquared_cox_snell'):
assert_allclose(float(self.res1.pseudo_rsquared(kind="cs")),
self.res2.prsquared_cox_snell, rtol=0.05)
@pytest.mark.smoke
def test_summary(self):
self.res1.summary()
@pytest.mark.smoke
def test_summary2(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore", DomainWarning)
self.res1.summary2()
def test_get_distribution(self):
res1 = self.res1
if not hasattr(res1.model.family, "get_distribution"):
# only Tweedie has not get_distribution
pytest.skip("get_distribution not available")
if isinstance(res1.model.family, sm.families.NegativeBinomial):
res_scale = 1 # QMLE scale can differ from 1
else:
res_scale = res1.scale
distr = res1.model.family.get_distribution(res1.fittedvalues,
res_scale)
var_endog = res1.model.family.variance(res1.fittedvalues) * res_scale
m, v = distr.stats()
assert_allclose(res1.fittedvalues, m, rtol=1e-13)
assert_allclose(var_endog, v, rtol=1e-13)
# check model method
distr2 = res1.model.get_distribution(res1.params, res_scale)
for k in distr2.kwds:
assert_allclose(distr.kwds[k], distr2.kwds[k], rtol=1e-13)
class CheckComparisonMixin(object):
def test_compare_discrete(self):
res1 = self.res1
resd = self.resd
assert_allclose(res1.llf, resd.llf, rtol=1e-10)
score_obs1 = res1.model.score_obs(res1.params * 0.98)
score_obsd = resd.model.score_obs(resd.params * 0.98)
assert_allclose(score_obs1, score_obsd, rtol=1e-10)
# score
score1 = res1.model.score(res1.params * 0.98)
assert_allclose(score1, score_obs1.sum(0), atol=1e-20)
score0 = res1.model.score(res1.params)
assert_allclose(score0, np.zeros(score_obs1.shape[1]), atol=5e-7)
hessian1 = res1.model.hessian(res1.params * 0.98, observed=False)
hessiand = resd.model.hessian(resd.params * 0.98)
assert_allclose(hessian1, hessiand, rtol=1e-10)
hessian1 = res1.model.hessian(res1.params * 0.98, observed=True)
hessiand = resd.model.hessian(resd.params * 0.98)
assert_allclose(hessian1, hessiand, rtol=1e-9)
def test_score_test(self):
res1 = self.res1
# fake example, should be zero, k_constraint should be 0
st, pv, df = res1.model.score_test(res1.params, k_constraints=1)
assert_allclose(st, 0, atol=1e-20)
assert_allclose(pv, 1, atol=1e-10)
assert_equal(df, 1)
st, pv, df = res1.model.score_test(res1.params, k_constraints=0)
assert_allclose(st, 0, atol=1e-20)
assert_(np.isnan(pv), msg=repr(pv))
assert_equal(df, 0)
# TODO: no verified numbers largely SMOKE test
exog_extra = res1.model.exog[:,1]**2
st, pv, df = res1.model.score_test(res1.params, exog_extra=exog_extra)
assert_array_less(0.1, st)
assert_array_less(0.1, pv)
assert_equal(df, 1)
def test_get_prediction(self):
pred1 = self.res1.get_prediction() # GLM
predd = self.resd.get_prediction() # discrete class
assert_allclose(predd.predicted, pred1.predicted_mean, rtol=1e-11)
assert_allclose(predd.se, pred1.se_mean, rtol=1e-6)
assert_allclose(predd.summary_frame().values,
pred1.summary_frame().values, rtol=1e-6)
class TestGlmGaussian(CheckModelResultsMixin):
@classmethod
def setup_class(cls):
'''
Test Gaussian family with canonical identity link
'''
# Test Precisions
cls.decimal_resids = DECIMAL_3
cls.decimal_params = DECIMAL_2
cls.decimal_bic = DECIMAL_0
cls.decimal_bse = DECIMAL_3
from statsmodels.datasets.longley import load
cls.data = load()
cls.data.endog = np.asarray(cls.data.endog)
cls.data.exog = np.asarray(cls.data.exog)
cls.data.exog = add_constant(cls.data.exog, prepend=False)
cls.res1 = GLM(cls.data.endog, cls.data.exog,
family=sm.families.Gaussian()).fit()
from .results.results_glm import Longley
cls.res2 = Longley()
def test_compare_OLS(self):
res1 = self.res1
# OLS does not define score_obs
from statsmodels.regression.linear_model import OLS
resd = OLS(self.data.endog, self.data.exog).fit()
self.resd = resd # attach to access from the outside
assert_allclose(res1.llf, resd.llf, rtol=1e-10)
score_obs1 = res1.model.score_obs(res1.params, scale=None)
score_obsd = resd.resid[:, None] / resd.scale * resd.model.exog
# low precision because of badly scaled exog
assert_allclose(score_obs1, score_obsd, rtol=1e-8)
score_obs1 = res1.model.score_obs(res1.params, scale=1)
score_obsd = resd.resid[:, None] * resd.model.exog
assert_allclose(score_obs1, score_obsd, rtol=1e-8)
hess_obs1 = res1.model.hessian(res1.params, scale=None)
hess_obsd = -1. / resd.scale * resd.model.exog.T.dot(resd.model.exog)
# low precision because of badly scaled exog
assert_allclose(hess_obs1, hess_obsd, rtol=1e-8)
# FIXME: enable or delete
# def setup(self):
# if skipR:
# raise SkipTest, "Rpy not installed."
# Gauss = r.gaussian
# self.res2 = RModel(self.data.endog, self.data.exog, r.glm, family=Gauss)
# self.res2.resids = np.array(self.res2.resid)[:,None]*np.ones((1,5))
# self.res2.null_deviance = 185008826 # taken from R. Rpy bug?
class TestGlmGaussianGradient(TestGlmGaussian):
@classmethod
def setup_class(cls):
'''
Test Gaussian family with canonical identity link
'''
# Test Precisions
cls.decimal_resids = DECIMAL_3
cls.decimal_params = DECIMAL_2
cls.decimal_bic = DECIMAL_0
cls.decimal_bse = DECIMAL_2
from statsmodels.datasets.longley import load
cls.data = load()
cls.data.endog = np.asarray(cls.data.endog)
cls.data.exog = np.asarray(cls.data.exog)
cls.data.exog = add_constant(cls.data.exog, prepend=False)
cls.res1 = GLM(cls.data.endog, cls.data.exog,
family=sm.families.Gaussian()).fit(method='newton')
from .results.results_glm import Longley
cls.res2 = Longley()
class TestGaussianLog(CheckModelResultsMixin):
@classmethod
def setup_class(cls):
# Test Precision
cls.decimal_aic_R = DECIMAL_0
cls.decimal_aic_Stata = DECIMAL_2
cls.decimal_loglike = DECIMAL_0
cls.decimal_null_deviance = DECIMAL_1
nobs = 100
x = np.arange(nobs)
np.random.seed(54321)
# y = 1.0 - .02*x - .001*x**2 + 0.001 * np.random.randn(nobs)
cls.X = np.c_[np.ones((nobs,1)),x,x**2]
cls.lny = np.exp(-(-1.0 + 0.02*x + 0.0001*x**2)) +\
0.001 * np.random.randn(nobs)
GaussLog_Model = GLM(cls.lny, cls.X,
family=sm.families.Gaussian(sm.families.links.log()))
cls.res1 = GaussLog_Model.fit()
from .results.results_glm import GaussianLog
cls.res2 = GaussianLog()
# FIXME: enable or delete
# def setup(cls):
# if skipR:
# raise SkipTest, "Rpy not installed"
# GaussLogLink = r.gaussian(link = "log")
# GaussLog_Res_R = RModel(cls.lny, cls.X, r.glm, family=GaussLogLink)
# cls.res2 = GaussLog_Res_R
class TestGaussianInverse(CheckModelResultsMixin):
@classmethod
def setup_class(cls):
# Test Precisions
cls.decimal_bic = DECIMAL_1
cls.decimal_aic_R = DECIMAL_1
cls.decimal_aic_Stata = DECIMAL_3
cls.decimal_loglike = DECIMAL_1
cls.decimal_resids = DECIMAL_3
nobs = 100
x = np.arange(nobs)
np.random.seed(54321)
y = 1.0 + 2.0 * x + x**2 + 0.1 * np.random.randn(nobs)
cls.X = np.c_[np.ones((nobs,1)),x,x**2]
cls.y_inv = (1. + .02*x + .001*x**2)**-1 + .001 * np.random.randn(nobs)
InverseLink_Model = GLM(cls.y_inv, cls.X,
family=sm.families.Gaussian(sm.families.links.inverse_power()))
InverseLink_Res = InverseLink_Model.fit()
cls.res1 = InverseLink_Res
from .results.results_glm import GaussianInverse
cls.res2 = GaussianInverse()
# FIXME: enable or delete
# def setup(cls):
# if skipR:
# raise SkipTest, "Rpy not installed."
# InverseLink = r.gaussian(link = "inverse")
# InverseLink_Res_R = RModel(cls.y_inv, cls.X, r.glm, family=InverseLink)
# cls.res2 = InverseLink_Res_R
class TestGlmBinomial(CheckModelResultsMixin):
@classmethod
def setup_class(cls):
'''
Test Binomial family with canonical logit link using star98 dataset.
'''
cls.decimal_resids = DECIMAL_1
cls.decimal_bic = DECIMAL_2
from statsmodels.datasets.star98 import load
from .results.results_glm import Star98
data = load()
data.endog = np.asarray(data.endog)
data.exog = np.asarray(data.exog)
data.exog = add_constant(data.exog, prepend=False)
cls.res1 = GLM(data.endog, data.exog,
family=sm.families.Binomial()).fit()
# NOTE: if you want to replicate with RModel
# res2 = RModel(data.endog[:,0]/trials, data.exog, r.glm,
# family=r.binomial, weights=trials)
cls.res2 = Star98()
def test_endog_dtype(self):
from statsmodels.datasets.star98 import load
data = load()
data.exog = add_constant(data.exog, prepend=False)
endog = data.endog.astype(int)
res2 = GLM(endog, data.exog, family=sm.families.Binomial()).fit()
assert_allclose(res2.params, self.res1.params)
endog = data.endog.astype(np.double)
res3 = GLM(endog, data.exog, family=sm.families.Binomial()).fit()
assert_allclose(res3.params, self.res1.params)
def test_invalid_endog(self, reset_randomstate):
# GH2733 inspired check
endog = np.random.randint(0, 100, size=(1000, 3))
exog = np.random.standard_normal((1000, 2))
with pytest.raises(ValueError, match='endog has more than 2 columns'):
GLM(endog, exog, family=sm.families.Binomial())
def test_invalid_endog_formula(self, reset_randomstate):
# GH2733
n = 200
exog = np.random.normal(size=(n, 2))
endog = np.random.randint(0, 3, size=n).astype(str)
# formula interface
data = pd.DataFrame({"y": endog, "x1": exog[:, 0], "x2": exog[:, 1]})
with pytest.raises(ValueError, match='array with multiple columns'):
sm.GLM.from_formula("y ~ x1 + x2", data,
family=sm.families.Binomial())
def test_get_distribution_binom_count(self):
# test for binomial counts with n_trials > 1
res1 = self.res1
res_scale = 1 # QMLE scale can differ from 1
mu_prob = res1.fittedvalues
n = res1.model.n_trials
distr = res1.model.family.get_distribution(mu_prob, res_scale,
n_trials=n)
var_endog = res1.model.family.variance(mu_prob) * res_scale
m, v = distr.stats()
assert_allclose(mu_prob * n, m, rtol=1e-13)
assert_allclose(var_endog * n, v, rtol=1e-13)
# check model method
distr2 = res1.model.get_distribution(res1.params, res_scale,
n_trials=n)
for k in distr2.kwds:
assert_allclose(distr.kwds[k], distr2.kwds[k], rtol=1e-13)
# FIXME: enable/xfail/skip or delete
# TODO:
# Non-Canonical Links for the Binomial family require the algorithm to be
# slightly changed
# class TestGlmBinomialLog(CheckModelResultsMixin):
# pass
# class TestGlmBinomialLogit(CheckModelResultsMixin):
# pass
# class TestGlmBinomialProbit(CheckModelResultsMixin):
# pass
# class TestGlmBinomialCloglog(CheckModelResultsMixin):
# pass
# class TestGlmBinomialPower(CheckModelResultsMixin):
# pass
# class TestGlmBinomialLoglog(CheckModelResultsMixin):
# pass
# class TestGlmBinomialLogc(CheckModelResultsMixin):
# TODO: need include logc link
# pass
class TestGlmBernoulli(CheckModelResultsMixin, CheckComparisonMixin):
@classmethod
def setup_class(cls):
from .results.results_glm import Lbw
cls.res2 = Lbw()
cls.res1 = GLM(cls.res2.endog, cls.res2.exog,
family=sm.families.Binomial()).fit()
modd = discrete.Logit(cls.res2.endog, cls.res2.exog)
cls.resd = modd.fit(start_params=cls.res1.params * 0.9, disp=False)
def test_score_r(self):
res1 = self.res1
res2 = self.res2
st, pv, df = res1.model.score_test(res1.params,
exog_extra=res1.model.exog[:, 1]**2)
st_res = 0.2837680293459376 # (-0.5326988167303712)**2
assert_allclose(st, st_res, rtol=1e-4)
st, pv, df = res1.model.score_test(res1.params,
exog_extra=res1.model.exog[:, 0]**2)
st_res = 0.6713492821514992 # (-0.8193590679009413)**2
assert_allclose(st, st_res, rtol=1e-4)
select = list(range(9))
select.pop(7)
res1b = GLM(res2.endog, res2.exog.iloc[:, select],
family=sm.families.Binomial()).fit()
tres = res1b.model.score_test(res1b.params,
exog_extra=res1.model.exog[:, -2])
tres = np.asarray(tres[:2]).ravel()
tres_r = (2.7864148487452, 0.0950667)
assert_allclose(tres, tres_r, rtol=1e-4)
cmd_r = """\
data = read.csv("...statsmodels\\statsmodels\\genmod\\tests\\results\\stata_lbw_glm.csv")
data["race_black"] = data["race"] == "black"
data["race_other"] = data["race"] == "other"
mod = glm(low ~ age + lwt + race_black + race_other + smoke + ptl + ht + ui, family=binomial, data=data)
options(digits=16)
anova(mod, test="Rao")
library(statmod)
s = glm.scoretest(mod, data["age"]**2)
s**2
s = glm.scoretest(mod, data["lwt"]**2)
s**2
"""
# class TestGlmBernoulliIdentity(CheckModelResultsMixin):
# pass
# class TestGlmBernoulliLog(CheckModelResultsMixin):
# pass
# class TestGlmBernoulliProbit(CheckModelResultsMixin):
# pass
# class TestGlmBernoulliCloglog(CheckModelResultsMixin):
# pass
# class TestGlmBernoulliPower(CheckModelResultsMixin):
# pass
# class TestGlmBernoulliLoglog(CheckModelResultsMixin):
# pass
# class test_glm_bernoulli_logc(CheckModelResultsMixin):
# pass
class TestGlmGamma(CheckModelResultsMixin):
@classmethod
def setup_class(cls):
'''
Tests Gamma family with canonical inverse link (power -1)
'''
# Test Precisions
cls.decimal_aic_R = -1 #TODO: off by about 1, we are right with Stata
cls.decimal_resids = DECIMAL_2
from statsmodels.datasets.scotland import load
from .results.results_glm import Scotvote
data = load()
data.exog = add_constant(data.exog, prepend=False)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
res1 = GLM(data.endog, data.exog,
family=sm.families.Gamma()).fit()
cls.res1 = res1
# res2 = RModel(data.endog, data.exog, r.glm, family=r.Gamma)
res2 = Scotvote()
res2.aic_R += 2 # R does not count degree of freedom for scale with gamma
cls.res2 = res2
class TestGlmGammaLog(CheckModelResultsMixin):
@classmethod
def setup_class(cls):
# Test Precisions
cls.decimal_resids = DECIMAL_3
cls.decimal_aic_R = DECIMAL_0
cls.decimal_fittedvalues = DECIMAL_3
from .results.results_glm import CancerLog
res2 = CancerLog()
cls.res1 = GLM(res2.endog, res2.exog,
family=sm.families.Gamma(link=sm.families.links.log())).fit()
cls.res2 = res2
# FIXME: enable or delete
# def setup(cls):
# if skipR:
# raise SkipTest, "Rpy not installed."
# cls.res2 = RModel(cls.data.endog, cls.data.exog, r.glm,
# family=r.Gamma(link="log"))
# cls.res2.null_deviance = 27.92207137420696 # From R (bug in rpy)
# cls.res2.bic = -154.1582089453923 # from Stata
class TestGlmGammaIdentity(CheckModelResultsMixin):
@classmethod
def setup_class(cls):
# Test Precisions
cls.decimal_resids = -100 #TODO Very off from Stata?
cls.decimal_params = DECIMAL_2
cls.decimal_aic_R = DECIMAL_0
cls.decimal_loglike = DECIMAL_1
from .results.results_glm import CancerIdentity
res2 = CancerIdentity()
with warnings.catch_warnings():
warnings.simplefilter("ignore")
fam = sm.families.Gamma(link=sm.families.links.identity())
cls.res1 = GLM(res2.endog, res2.exog, family=fam).fit()
cls.res2 = res2
# FIXME: enable or delete
# def setup(cls):
# if skipR:
# raise SkipTest, "Rpy not installed."
# cls.res2 = RModel(cls.data.endog, cls.data.exog, r.glm,
# family=r.Gamma(link="identity"))
# cls.res2.null_deviance = 27.92207137420696 # from R, Rpy bug
class TestGlmPoisson(CheckModelResultsMixin, CheckComparisonMixin):
@classmethod
def setup_class(cls):
'''
Tests Poisson family with canonical log link.
Test results were obtained by R.
'''
from .results.results_glm import Cpunish
cls.data = cpunish.load()
cls.data.endog = np.asarray(cls.data.endog)
cls.data.exog = np.asarray(cls.data.exog)
cls.data.exog[:, 3] = np.log(cls.data.exog[:, 3])
cls.data.exog = add_constant(cls.data.exog, prepend=False)
cls.res1 = GLM(cls.data.endog, cls.data.exog,
family=sm.families.Poisson()).fit()
cls.res2 = Cpunish()
# compare with discrete, start close to save time
modd = discrete.Poisson(cls.data.endog, cls.data.exog)
cls.resd = modd.fit(start_params=cls.res1.params * 0.9, disp=False)
#class TestGlmPoissonIdentity(CheckModelResultsMixin):
# pass
#class TestGlmPoissonPower(CheckModelResultsMixin):
# pass
class TestGlmInvgauss(CheckModelResultsMixin):
@classmethod
def setup_class(cls):
'''
Tests the Inverse Gaussian family in GLM.
Notes
-----
Used the rndivgx.ado file provided by Hardin and Hilbe to
generate the data. Results are read from model_results, which
were obtained by running R_ig.s
'''
# Test Precisions
cls.decimal_aic_R = DECIMAL_0
cls.decimal_loglike = DECIMAL_0
from .results.results_glm import InvGauss
res2 = InvGauss()
res1 = GLM(res2.endog, res2.exog,
family=sm.families.InverseGaussian()).fit()
cls.res1 = res1
cls.res2 = res2
def test_get_distribution(self):
res1 = self.res1
distr = res1.model.family.get_distribution(res1.fittedvalues,
res1.scale)
var_endog = res1.model.family.variance(res1.fittedvalues) * res1.scale
m, v = distr.stats()
assert_allclose(res1.fittedvalues, m, rtol=1e-13)
assert_allclose(var_endog, v, rtol=1e-13)
class TestGlmInvgaussLog(CheckModelResultsMixin):
@classmethod
def setup_class(cls):
# Test Precisions
cls.decimal_aic_R = -10 # Big difference vs R.
cls.decimal_resids = DECIMAL_3
from .results.results_glm import InvGaussLog
res2 = InvGaussLog()
cls.res1 = GLM(res2.endog, res2.exog,
family=sm.families.InverseGaussian(
link=sm.families.links.log())).fit()
cls.res2 = res2
# FIXME: enable or delete
# def setup(cls):
# if skipR:
# raise SkipTest, "Rpy not installed."
# cls.res2 = RModel(cls.data.endog, cls.data.exog, r.glm,
# family=r.inverse_gaussian(link="log"))
# cls.res2.null_deviance = 335.1539777981053 # from R, Rpy bug
# cls.res2.llf = -12162.72308 # from Stata, R's has big rounding diff
class TestGlmInvgaussIdentity(CheckModelResultsMixin):
@classmethod
def setup_class(cls):
# Test Precisions
cls.decimal_aic_R = -10 #TODO: Big difference vs R
cls.decimal_fittedvalues = DECIMAL_3
cls.decimal_params = DECIMAL_3
from .results.results_glm import Medpar1
data = Medpar1()
with warnings.catch_warnings():
warnings.simplefilter("ignore")
cls.res1 = GLM(data.endog, data.exog,
family=sm.families.InverseGaussian(
link=sm.families.links.identity())).fit()
from .results.results_glm import InvGaussIdentity
cls.res2 = InvGaussIdentity()
# FIXME: enable or delete
# def setup(cls):
# if skipR:
# raise SkipTest, "Rpy not installed."
# cls.res2 = RModel(cls.data.endog, cls.data.exog, r.glm,
# family=r.inverse_gaussian(link="identity"))
# cls.res2.null_deviance = 335.1539777981053 # from R, Rpy bug
# cls.res2.llf = -12163.25545 # from Stata, big diff with R
class TestGlmNegbinomial(CheckModelResultsMixin):
@classmethod
def setup_class(cls):
'''
Test Negative Binomial family with log link
'''
# Test Precision
cls.decimal_resid = DECIMAL_1
cls.decimal_params = DECIMAL_3
cls.decimal_resids = -1 # 1 % mismatch at 0
cls.decimal_fittedvalues = DECIMAL_1
from statsmodels.datasets.committee import load
cls.data = load()
cls.data.endog = np.asarray(cls.data.endog)
cls.data.exog = np.asarray(cls.data.exog)
cls.data.exog[:,2] = np.log(cls.data.exog[:,2])
interaction = cls.data.exog[:,2]*cls.data.exog[:,1]
cls.data.exog = np.column_stack((cls.data.exog,interaction))
cls.data.exog = add_constant(cls.data.exog, prepend=False)
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=DomainWarning)
fam = sm.families.NegativeBinomial()
cls.res1 = GLM(cls.data.endog, cls.data.exog,
family=fam).fit(scale='x2')
from .results.results_glm import Committee
res2 = Committee()
res2.aic_R += 2 # They do not count a degree of freedom for the scale
cls.res2 = res2
# FIXME: enable or delete
# def setup(self):
# if skipR:
# raise SkipTest, "Rpy not installed"
# r.library('MASS') # this does not work when done in rmodelwrap?
# self.res2 = RModel(self.data.endog, self.data.exog, r.glm,
# family=r.negative_binomial(1))
# self.res2.null_deviance = 27.8110469364343
# FIXME: enable/xfail/skip or delete
#class TestGlmNegbinomial_log(CheckModelResultsMixin):
# pass
# FIXME: enable/xfail/skip or delete
#class TestGlmNegbinomial_power(CheckModelResultsMixin):
# pass
# FIXME: enable/xfail/skip or delete
#class TestGlmNegbinomial_nbinom(CheckModelResultsMixin):
# pass
class TestGlmPoissonOffset(CheckModelResultsMixin):
@classmethod
def setup_class(cls):
from .results.results_glm import Cpunish_offset
cls.decimal_params = DECIMAL_4
cls.decimal_bse = DECIMAL_4
cls.decimal_aic_R = 3
data = cpunish.load()
data.endog = np.asarray(data.endog)
data.exog = np.asarray(data.exog)
data.exog[:, 3] = np.log(data.exog[:, 3])
data.exog = add_constant(data.exog, prepend=True)
exposure = [100] * len(data.endog)
cls.data = data
cls.exposure = exposure
cls.res1 = GLM(data.endog, data.exog, family=sm.families.Poisson(),
exposure=exposure).fit()
cls.res2 = Cpunish_offset()
def test_missing(self):
# make sure offset is dropped correctly
endog = self.data.endog.copy()
endog[[2,4,6,8]] = np.nan
mod = GLM(endog, self.data.exog, family=sm.families.Poisson(),
exposure=self.exposure, missing='drop')
assert_equal(mod.exposure.shape[0], 13)
def test_offset_exposure(self):
# exposure=x and offset=log(x) should have the same effect
np.random.seed(382304)
endog = np.random.randint(0, 10, 100)
exog = np.random.normal(size=(100,3))
exposure = np.random.uniform(1, 2, 100)
offset = np.random.uniform(1, 2, 100)
mod1 = GLM(endog, exog, family=sm.families.Poisson(),
offset=offset, exposure=exposure).fit()
offset2 = offset + np.log(exposure)
mod2 = GLM(endog, exog, family=sm.families.Poisson(),
offset=offset2).fit()
assert_almost_equal(mod1.params, mod2.params)
assert_allclose(mod1.null, mod2.null, rtol=1e-10)
# test recreating model
mod1_ = mod1.model
kwds = mod1_._get_init_kwds()
assert_allclose(kwds['exposure'], exposure, rtol=1e-14)
assert_allclose(kwds['offset'], mod1_.offset, rtol=1e-14)
mod3 = mod1_.__class__(mod1_.endog, mod1_.exog, **kwds)
assert_allclose(mod3.exposure, mod1_.exposure, rtol=1e-14)
assert_allclose(mod3.offset, mod1_.offset, rtol=1e-14)
# test fit_regularized exposure, see #4605
resr1 = mod1.model.fit_regularized()
resr2 = mod2.model.fit_regularized()
assert_allclose(resr1.params, resr2.params, rtol=1e-10)
def test_predict(self):
np.random.seed(382304)
endog = np.random.randint(0, 10, 100)
exog = np.random.normal(size=(100,3))
exposure = np.random.uniform(1, 2, 100)
mod1 = GLM(endog, exog, family=sm.families.Poisson(),
exposure=exposure).fit()
exog1 = np.random.normal(size=(10,3))
exposure1 = np.random.uniform(1, 2, 10)
# Doubling exposure time should double expected response
pred1 = mod1.predict(exog=exog1, exposure=exposure1)
pred2 = mod1.predict(exog=exog1, exposure=2*exposure1)
assert_almost_equal(pred2, 2*pred1)
# Check exposure defaults
pred3 = mod1.predict()
pred4 = mod1.predict(exposure=exposure)
pred5 = mod1.predict(exog=exog, exposure=exposure)
assert_almost_equal(pred3, pred4)
assert_almost_equal(pred4, pred5)
# Check offset defaults
offset = np.random.uniform(1, 2, 100)
mod2 = GLM(endog, exog, offset=offset, family=sm.families.Poisson()).fit()
pred1 = mod2.predict()
pred2 = mod2.predict(offset=offset)
pred3 = mod2.predict(exog=exog, offset=offset)
assert_almost_equal(pred1, pred2)
assert_almost_equal(pred2, pred3)
# Check that offset shifts the linear predictor
mod3 = GLM(endog, exog, family=sm.families.Poisson()).fit()
offset = np.random.uniform(1, 2, 10)
pred1 = mod3.predict(exog=exog1, offset=offset, linear=True)
pred2 = mod3.predict(exog=exog1, offset=2*offset, linear=True)
assert_almost_equal(pred2, pred1+offset)
# Passing exposure as a pandas series should not effect output type
assert isinstance(
mod1.predict(exog=exog1, exposure=pd.Series(exposure1)),
np.ndarray
)
def test_perfect_pred(iris):
y = iris[:, -1]
X = iris[:, :-1]
X = X[y != 2]
y = y[y != 2]
X = add_constant(X, prepend=True)
glm = GLM(y, X, family=sm.families.Binomial())
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=RuntimeWarning)
assert_raises(PerfectSeparationError, glm.fit)
def test_score_test_ols():
# nicer example than Longley
from statsmodels.regression.linear_model import OLS
np.random.seed(5)
nobs = 100
sige = 0.5
x = np.random.uniform(0, 1, size=(nobs, 5))
x[:, 0] = 1
beta = 1. / np.arange(1., x.shape[1] + 1)
y = x.dot(beta) + sige * np.random.randn(nobs)
res_ols = OLS(y, x).fit()
res_olsc = OLS(y, x[:, :-2]).fit()
co = res_ols.compare_lm_test(res_olsc, demean=False)
res_glm = GLM(y, x[:, :-2], family=sm.families.Gaussian()).fit()
co2 = res_glm.model.score_test(res_glm.params, exog_extra=x[:, -2:])
# difference in df_resid versus nobs in scale see #1786
assert_allclose(co[0] * 97 / 100., co2[0], rtol=1e-13)
def test_attribute_writable_resettable():
# Regression test for mutables and class constructors.
data = sm.datasets.longley.load()
endog, exog = data.endog, data.exog
glm_model = sm.GLM(endog, exog)
assert_equal(glm_model.family.link.power, 1.0)
glm_model.family.link.power = 2.
assert_equal(glm_model.family.link.power, 2.0)
glm_model2 = sm.GLM(endog, exog)
assert_equal(glm_model2.family.link.power, 1.0)
class TestStartParams(CheckModelResultsMixin):
@classmethod
def setup_class(cls):
'''
Test Gaussian family with canonical identity link
'''
# Test Precisions
cls.decimal_resids = DECIMAL_3
cls.decimal_params = DECIMAL_2
cls.decimal_bic = DECIMAL_0
cls.decimal_bse = DECIMAL_3
from statsmodels.datasets.longley import load
cls.data = load()
cls.data.exog = add_constant(cls.data.exog, prepend=False)
params = sm.OLS(cls.data.endog, cls.data.exog).fit().params
cls.res1 = GLM(cls.data.endog, cls.data.exog,
family=sm.families.Gaussian()).fit(start_params=params)
from .results.results_glm import Longley
cls.res2 = Longley()
def test_glm_start_params():
# see 1604
y2 = np.array('0 1 0 0 0 1'.split(), int)
wt = np.array([50,1,50,1,5,10])
y2 = np.repeat(y2, wt)
x2 = np.repeat([0,0,0.001,100,-1,-1], wt)
mod = sm.GLM(y2, sm.add_constant(x2), family=sm.families.Binomial())
res = mod.fit(start_params=[-4, -5])
np.testing.assert_almost_equal(res.params, [-4.60305022, -5.29634545], 6)
def test_loglike_no_opt():
# see 1728
y = np.asarray([0, 1, 0, 0, 1, 1, 0, 1, 1, 1])
x = np.arange(10, dtype=np.float64)
def llf(params):
lin_pred = params[0] + params[1]*x
pr = 1 / (1 + np.exp(-lin_pred))
return np.sum(y*np.log(pr) + (1-y)*np.log(1-pr))
for params in [0,0], [0,1], [0.5,0.5]:
mod = sm.GLM(y, sm.add_constant(x), family=sm.families.Binomial())
res = mod.fit(start_params=params, maxiter=0)
like = llf(params)
assert_almost_equal(like, res.llf)
def test_formula_missing_exposure():
# see 2083
import statsmodels.formula.api as smf
d = {'Foo': [1, 2, 10, 149], 'Bar': [1, 2, 3, np.nan],
'constant': [1] * 4, 'exposure': np.random.uniform(size=4),
'x': [1, 3, 2, 1.5]}
df = pd.DataFrame(d)
family = sm.families.Gaussian(link=sm.families.links.log())
mod = smf.glm("Foo ~ Bar", data=df, exposure=df.exposure,
family=family)
assert_(type(mod.exposure) is np.ndarray, msg='Exposure is not ndarray')
exposure = pd.Series(np.random.uniform(size=5))
df.loc[3, 'Bar'] = 4 # nan not relevant for Valueerror for shape mismatch
assert_raises(ValueError, smf.glm, "Foo ~ Bar", data=df,
exposure=exposure, family=family)
assert_raises(ValueError, GLM, df.Foo, df[['constant', 'Bar']],
exposure=exposure, family=family)
@pytest.mark.matplotlib
def test_plots(close_figures):
np.random.seed(378)
n = 200
exog = np.random.normal(size=(n, 2))
lin_pred = exog[:, 0] + exog[:, 1]**2
prob = 1 / (1 + np.exp(-lin_pred))
endog = 1 * (np.random.uniform(size=n) < prob)
model = sm.GLM(endog, exog, family=sm.families.Binomial())
result = model.fit()
import pandas as pd
from statsmodels.graphics.regressionplots import add_lowess
# array interface
for j in 0,1:
fig = result.plot_added_variable(j)
add_lowess(fig.axes[0], frac=0.5)
close_or_save(pdf, fig)
fig = result.plot_partial_residuals(j)
add_lowess(fig.axes[0], frac=0.5)
close_or_save(pdf, fig)
fig = result.plot_ceres_residuals(j)
add_lowess(fig.axes[0], frac=0.5)
close_or_save(pdf, fig)
# formula interface
data = pd.DataFrame({"y": endog, "x1": exog[:, 0], "x2": exog[:, 1]})
model = sm.GLM.from_formula("y ~ x1 + x2", data, family=sm.families.Binomial())
result = model.fit()
for j in 0,1:
xname = ["x1", "x2"][j]
fig = result.plot_added_variable(xname)
add_lowess(fig.axes[0], frac=0.5)
close_or_save(pdf, fig)
fig = result.plot_partial_residuals(xname)
add_lowess(fig.axes[0], frac=0.5)
close_or_save(pdf, fig)
fig = result.plot_ceres_residuals(xname)
add_lowess(fig.axes[0], frac=0.5)
close_or_save(pdf, fig)
def gen_endog(lin_pred, family_class, link, binom_version=0):
np.random.seed(872)
fam = sm.families
mu = link().inverse(lin_pred)
if family_class == fam.Binomial:
if binom_version == 0:
endog = 1*(np.random.uniform(size=len(lin_pred)) < mu)
else:
endog = np.empty((len(lin_pred), 2))
n = 10
endog[:, 0] = (np.random.uniform(size=(len(lin_pred), n)) < mu[:, None]).sum(1)
endog[:, 1] = n - endog[:, 0]
elif family_class == fam.Poisson:
endog = np.random.poisson(mu)
elif family_class == fam.Gamma:
endog = np.random.gamma(2, mu)
elif family_class == fam.Gaussian:
endog = mu + 2 * np.random.normal(size=len(lin_pred))
elif family_class == fam.NegativeBinomial:
from scipy.stats.distributions import nbinom
endog = nbinom.rvs(mu, 0.5)
elif family_class == fam.InverseGaussian:
from scipy.stats.distributions import invgauss
endog = invgauss.rvs(mu, scale=20)
else:
raise ValueError
return endog
@pytest.mark.smoke
def test_summary():
np.random.seed(4323)
n = 100
exog = np.random.normal(size=(n, 2))
exog[:, 0] = 1
endog = np.random.normal(size=n)
for method in ["irls", "cg"]:
fa = sm.families.Gaussian()
model = sm.GLM(endog, exog, family=fa)
rslt = model.fit(method=method)
s = rslt.summary()
def check_score_hessian(results):
# compare models core and hessian with numerical derivatives
params = results.params
# avoid checking score at MLE, score close to zero
sc = results.model.score(params * 0.98, scale=1)
# cs currently (0.9) does not work for all families
llfunc = lambda x: results.model.loglike(x, scale=1) # noqa
sc2 = approx_fprime(params * 0.98, llfunc)
assert_allclose(sc, sc2, rtol=1e-4, atol=1e-4)
hess = results.model.hessian(params, scale=1)
hess2 = approx_hess(params, llfunc)
assert_allclose(hess, hess2, rtol=1e-4)
scfunc = lambda x: results.model.score(x, scale=1) # noqa
hess3 = approx_fprime(params, scfunc)
assert_allclose(hess, hess3, rtol=1e-4)
def test_gradient_irls():
# Compare the results when using gradient optimization and IRLS.
# TODO: Find working examples for inverse_squared link
np.random.seed(87342)
fam = sm.families
lnk = sm.families.links
families = [(fam.Binomial, [lnk.logit, lnk.probit, lnk.cloglog, lnk.log, lnk.cauchy]),
(fam.Poisson, [lnk.log, lnk.identity, lnk.sqrt]),
(fam.Gamma, [lnk.log, lnk.identity, lnk.inverse_power]),
(fam.Gaussian, [lnk.identity, lnk.log, lnk.inverse_power]),
(fam.InverseGaussian, [lnk.log, lnk.identity, lnk.inverse_power, lnk.inverse_squared]),
(fam.NegativeBinomial, [lnk.log, lnk.inverse_power, lnk.inverse_squared, lnk.identity])]
n = 100
p = 3
exog = np.random.normal(size=(n, p))
exog[:, 0] = 1
skip_one = False
for family_class, family_links in families:
for link in family_links:
for binom_version in 0,1:
if family_class != fam.Binomial and binom_version == 1:
continue
if (family_class, link) == (fam.Poisson, lnk.identity):
lin_pred = 20 + exog.sum(1)
elif (family_class, link) == (fam.Binomial, lnk.log):
lin_pred = -1 + exog.sum(1) / 8
elif (family_class, link) == (fam.Poisson, lnk.sqrt):
lin_pred = 2 + exog.sum(1)
elif (family_class, link) == (fam.InverseGaussian, lnk.log):
#skip_zero = True
lin_pred = -1 + exog.sum(1)
elif (family_class, link) == (fam.InverseGaussian, lnk.identity):
lin_pred = 20 + 5*exog.sum(1)
lin_pred = np.clip(lin_pred, 1e-4, np.inf)
elif (family_class, link) == (fam.InverseGaussian, lnk.inverse_squared):
lin_pred = 0.5 + exog.sum(1) / 5
continue # skip due to non-convergence
elif (family_class, link) == (fam.InverseGaussian, lnk.inverse_power):
lin_pred = 1 + exog.sum(1) / 5
elif (family_class, link) == (fam.NegativeBinomial, lnk.identity):
lin_pred = 20 + 5*exog.sum(1)
lin_pred = np.clip(lin_pred, 1e-4, np.inf)
elif (family_class, link) == (fam.NegativeBinomial, lnk.inverse_squared):
lin_pred = 0.1 + np.random.uniform(size=exog.shape[0])
continue # skip due to non-convergence
elif (family_class, link) == (fam.NegativeBinomial, lnk.inverse_power):
lin_pred = 1 + exog.sum(1) / 5
elif (family_class, link) == (fam.Gaussian, lnk.inverse_power):
# adding skip because of convergence failure
skip_one = True
# the following fails with identity link, because endog < 0
# elif family_class == fam.Gamma:
# lin_pred = 0.5 * exog.sum(1) + np.random.uniform(size=exog.shape[0])
else:
lin_pred = np.random.uniform(size=exog.shape[0])
endog = gen_endog(lin_pred, family_class, link, binom_version)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
mod_irls = sm.GLM(endog, exog, family=family_class(link=link()))
rslt_irls = mod_irls.fit(method="IRLS")
if not (family_class, link) in [(fam.Poisson, lnk.sqrt),
(fam.Gamma, lnk.inverse_power),
(fam.InverseGaussian, lnk.identity)
]:
check_score_hessian(rslt_irls)
# Try with and without starting values.
for max_start_irls, start_params in (0, rslt_irls.params), (3, None):
# TODO: skip convergence failures for now
if max_start_irls > 0 and skip_one:
continue
with warnings.catch_warnings():
warnings.simplefilter("ignore")
mod_gradient = sm.GLM(endog, exog, family=family_class(link=link()))
rslt_gradient = mod_gradient.fit(max_start_irls=max_start_irls,
start_params=start_params,
method="newton", maxiter=300)
assert_allclose(rslt_gradient.params,
rslt_irls.params, rtol=1e-6, atol=5e-5)
assert_allclose(rslt_gradient.llf, rslt_irls.llf,
rtol=1e-6, atol=1e-6)
assert_allclose(rslt_gradient.scale, rslt_irls.scale,
rtol=1e-6, atol=1e-6)
# Get the standard errors using expected information.
gradient_bse = rslt_gradient.bse
ehess = mod_gradient.hessian(rslt_gradient.params, observed=False)
gradient_bse = np.sqrt(-np.diag(np.linalg.inv(ehess)))
assert_allclose(gradient_bse, rslt_irls.bse, rtol=1e-6, atol=5e-5)
# rslt_irls.bse corresponds to observed=True
assert_allclose(rslt_gradient.bse, rslt_irls.bse, rtol=0.2, atol=5e-5)
rslt_gradient_eim = mod_gradient.fit(max_start_irls=0,
cov_type='eim',
start_params=rslt_gradient.params,
method="newton", maxiter=300)
assert_allclose(rslt_gradient_eim.bse, rslt_irls.bse, rtol=5e-5, atol=0)
def test_gradient_irls_eim():
# Compare the results when using eime gradient optimization and IRLS.
# TODO: Find working examples for inverse_squared link
np.random.seed(87342)
fam = sm.families
lnk = sm.families.links
families = [(fam.Binomial, [lnk.logit, lnk.probit, lnk.cloglog, lnk.log,
lnk.cauchy]),
(fam.Poisson, [lnk.log, lnk.identity, lnk.sqrt]),
(fam.Gamma, [lnk.log, lnk.identity, lnk.inverse_power]),
(fam.Gaussian, [lnk.identity, lnk.log, lnk.inverse_power]),
(fam.InverseGaussian, [lnk.log, lnk.identity,
lnk.inverse_power,
lnk.inverse_squared]),
(fam.NegativeBinomial, [lnk.log, lnk.inverse_power,
lnk.inverse_squared, lnk.identity])]
n = 100
p = 3
exog = np.random.normal(size=(n, p))
exog[:, 0] = 1
skip_one = False
for family_class, family_links in families:
for link in family_links:
for binom_version in 0, 1:
if family_class != fam.Binomial and binom_version == 1:
continue
if (family_class, link) == (fam.Poisson, lnk.identity):
lin_pred = 20 + exog.sum(1)
elif (family_class, link) == (fam.Binomial, lnk.log):
lin_pred = -1 + exog.sum(1) / 8
elif (family_class, link) == (fam.Poisson, lnk.sqrt):
lin_pred = 2 + exog.sum(1)
elif (family_class, link) == (fam.InverseGaussian, lnk.log):
# skip_zero = True
lin_pred = -1 + exog.sum(1)
elif (family_class, link) == (fam.InverseGaussian,
lnk.identity):
lin_pred = 20 + 5*exog.sum(1)
lin_pred = np.clip(lin_pred, 1e-4, np.inf)
elif (family_class, link) == (fam.InverseGaussian,
lnk.inverse_squared):
lin_pred = 0.5 + exog.sum(1) / 5
continue # skip due to non-convergence
elif (family_class, link) == (fam.InverseGaussian,
lnk.inverse_power):
lin_pred = 1 + exog.sum(1) / 5
elif (family_class, link) == (fam.NegativeBinomial,
lnk.identity):
lin_pred = 20 + 5*exog.sum(1)
lin_pred = np.clip(lin_pred, 1e-4, np.inf)
elif (family_class, link) == (fam.NegativeBinomial,
lnk.inverse_squared):
lin_pred = 0.1 + np.random.uniform(size=exog.shape[0])
continue # skip due to non-convergence
elif (family_class, link) == (fam.NegativeBinomial,
lnk.inverse_power):
lin_pred = 1 + exog.sum(1) / 5
elif (family_class, link) == (fam.Gaussian, lnk.inverse_power):
# adding skip because of convergence failure
skip_one = True
else:
lin_pred = np.random.uniform(size=exog.shape[0])
endog = gen_endog(lin_pred, family_class, link, binom_version)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
mod_irls = sm.GLM(endog, exog,
family=family_class(link=link()))
rslt_irls = mod_irls.fit(method="IRLS")
# Try with and without starting values.
for max_start_irls, start_params in ((0, rslt_irls.params),
(3, None)):
# TODO: skip convergence failures for now
if max_start_irls > 0 and skip_one:
continue
with warnings.catch_warnings():
warnings.simplefilter("ignore")
mod_gradient = sm.GLM(endog, exog,
family=family_class(link=link()))
rslt_gradient = mod_gradient.fit(
max_start_irls=max_start_irls,
start_params=start_params,
method="newton",
optim_hessian='eim'
)
assert_allclose(rslt_gradient.params, rslt_irls.params,
rtol=1e-6, atol=5e-5)
assert_allclose(rslt_gradient.llf, rslt_irls.llf,
rtol=1e-6, atol=1e-6)
assert_allclose(rslt_gradient.scale, rslt_irls.scale,
rtol=1e-6, atol=1e-6)
# Get the standard errors using expected information.
ehess = mod_gradient.hessian(rslt_gradient.params,
observed=False)
gradient_bse = np.sqrt(-np.diag(np.linalg.inv(ehess)))
assert_allclose(gradient_bse, rslt_irls.bse, rtol=1e-6,
atol=5e-5)
def test_glm_irls_method():
nobs, k_vars = 50, 4
np.random.seed(987126)
x = np.random.randn(nobs, k_vars - 1)
exog = add_constant(x, has_constant='add')
y = exog.sum(1) + np.random.randn(nobs)
mod = GLM(y, exog)
res1 = mod.fit()
res2 = mod.fit(wls_method='pinv', attach_wls=True)
res3 = mod.fit(wls_method='qr', attach_wls=True)
# fit_gradient does not attach mle_settings
res_g1 = mod.fit(start_params=res1.params, method='bfgs')
for r in [res1, res2, res3]:
assert_equal(r.mle_settings['optimizer'], 'IRLS')
assert_equal(r.method, 'IRLS')
assert_equal(res1.mle_settings['wls_method'], 'lstsq')
assert_equal(res2.mle_settings['wls_method'], 'pinv')
assert_equal(res3.mle_settings['wls_method'], 'qr')
assert_(hasattr(res2.results_wls.model, 'pinv_wexog'))
assert_(hasattr(res3.results_wls.model, 'exog_Q'))
# fit_gradient currently does not attach mle_settings
assert_equal(res_g1.method, 'bfgs')
class CheckWtdDuplicationMixin(object):
decimal_params = DECIMAL_4
@classmethod
def setup_class(cls):
cls.data = cpunish.load()
cls.data.endog = np.asarray(cls.data.endog)
cls.data.exog = np.asarray(cls.data.exog)
cls.endog = cls.data.endog
cls.exog = cls.data.exog
np.random.seed(1234)
cls.weight = np.random.randint(5, 100, len(cls.endog))
cls.endog_big = np.repeat(cls.endog, cls.weight)
cls.exog_big = np.repeat(cls.exog, cls.weight, axis=0)
def test_params(self):
assert_allclose(self.res1.params, self.res2.params, atol=1e-6,
rtol=1e-6)
decimal_bse = DECIMAL_4
def test_standard_errors(self):
assert_allclose(self.res1.bse, self.res2.bse, rtol=1e-5, atol=1e-6)
decimal_resids = DECIMAL_4
# TODO: This does not work... Arrays are of different shape.
# Perhaps we use self.res1.model.family.resid_XXX()?
"""
def test_residuals(self):
resids1 = np.column_stack((self.res1.resid_pearson,
self.res1.resid_deviance,
self.res1.resid_working,
self.res1.resid_anscombe,
self.res1.resid_response))
resids2 = np.column_stack((self.res1.resid_pearson,
self.res2.resid_deviance,
self.res2.resid_working,
self.res2.resid_anscombe,
self.res2.resid_response))
assert_allclose(resids1, resids2, self.decimal_resids)
"""
def test_aic(self):
# R includes the estimation of the scale as a lost dof
# Does not with Gamma though
assert_allclose(self.res1.aic, self.res2.aic, atol=1e-6, rtol=1e-6)
def test_deviance(self):
assert_allclose(self.res1.deviance, self.res2.deviance, atol=1e-6,
rtol=1e-6)
def test_scale(self):
assert_allclose(self.res1.scale, self.res2.scale, atol=1e-6, rtol=1e-6)
def test_loglike(self):
# Stata uses the below llf for these families
# We differ with R for them
assert_allclose(self.res1.llf, self.res2.llf, 1e-6)
decimal_null_deviance = DECIMAL_4
def test_null_deviance(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore", DomainWarning)
assert_allclose(self.res1.null_deviance,
self.res2.null_deviance,
atol=1e-6,
rtol=1e-6)
decimal_bic = DECIMAL_4
def test_bic(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
assert_allclose(self.res1.bic, self.res2.bic, atol=1e-6, rtol=1e-6)
decimal_fittedvalues = DECIMAL_4
def test_fittedvalues(self):
res2_fitted = self.res2.predict(self.res1.model.exog)
assert_allclose(self.res1.fittedvalues, res2_fitted, atol=1e-5,
rtol=1e-5)
decimal_tpvalues = DECIMAL_4
def test_tpvalues(self):
# test comparing tvalues and pvalues with normal implementation
# make sure they use normal distribution (inherited in results class)
assert_allclose(self.res1.tvalues, self.res2.tvalues, atol=1e-6,
rtol=2e-4)
assert_allclose(self.res1.pvalues, self.res2.pvalues, atol=1e-6,
rtol=1e-6)
assert_allclose(self.res1.conf_int(), self.res2.conf_int(), atol=1e-6,
rtol=1e-6)
class TestWtdGlmPoisson(CheckWtdDuplicationMixin):
@classmethod
def setup_class(cls):
'''
Tests Poisson family with canonical log link.
'''
super(TestWtdGlmPoisson, cls).setup_class()
cls.endog = np.asarray(cls.endog)
cls.exog = np.asarray(cls.exog)
cls.res1 = GLM(cls.endog, cls.exog,
freq_weights=cls.weight,
family=sm.families.Poisson()).fit()
cls.res2 = GLM(cls.endog_big, cls.exog_big,
family=sm.families.Poisson()).fit()
class TestWtdGlmPoissonNewton(CheckWtdDuplicationMixin):
@classmethod
def setup_class(cls):
'''
Tests Poisson family with canonical log link.
'''
super(TestWtdGlmPoissonNewton, cls).setup_class()
start_params = np.array([1.82794424e-04, -4.76785037e-02,
-9.48249717e-02, -2.92293226e-04,
2.63728909e+00, -2.05934384e+01])
fit_kwds = dict(method='newton')
cls.res1 = GLM(cls.endog, cls.exog,
freq_weights=cls.weight,
family=sm.families.Poisson()).fit(**fit_kwds)
fit_kwds = dict(method='newton', start_params=start_params)
cls.res2 = GLM(cls.endog_big, cls.exog_big,
family=sm.families.Poisson()).fit(**fit_kwds)
class TestWtdGlmPoissonHC0(CheckWtdDuplicationMixin):
@classmethod
def setup_class(cls):
'''
Tests Poisson family with canonical log link.
'''
super(TestWtdGlmPoissonHC0, cls).setup_class()
start_params = np.array([1.82794424e-04, -4.76785037e-02,
-9.48249717e-02, -2.92293226e-04,
2.63728909e+00, -2.05934384e+01])
fit_kwds = dict(cov_type='HC0')
cls.res1 = GLM(cls.endog, cls.exog,
freq_weights=cls.weight,
family=sm.families.Poisson()).fit(**fit_kwds)
fit_kwds = dict(cov_type='HC0', start_params=start_params)
cls.res2 = GLM(cls.endog_big, cls.exog_big,
family=sm.families.Poisson()).fit(**fit_kwds)
class TestWtdGlmPoissonClu(CheckWtdDuplicationMixin):
@classmethod
def setup_class(cls):
'''
Tests Poisson family with canonical log link.
'''
super(TestWtdGlmPoissonClu, cls).setup_class()
start_params = np.array([1.82794424e-04, -4.76785037e-02,
-9.48249717e-02, -2.92293226e-04,
2.63728909e+00, -2.05934384e+01])
gid = np.arange(1, len(cls.endog) + 1) // 2
fit_kwds = dict(cov_type='cluster', cov_kwds={'groups': gid, 'use_correction':False})
import warnings
with warnings.catch_warnings():
warnings.simplefilter("ignore")
cls.res1 = GLM(cls.endog, cls.exog,
freq_weights=cls.weight,
family=sm.families.Poisson()).fit(**fit_kwds)
gidr = np.repeat(gid, cls.weight)
fit_kwds = dict(cov_type='cluster', cov_kwds={'groups': gidr, 'use_correction':False})
cls.res2 = GLM(cls.endog_big, cls.exog_big,
family=sm.families.Poisson()).fit(start_params=start_params,
**fit_kwds)
class TestWtdGlmBinomial(CheckWtdDuplicationMixin):
@classmethod
def setup_class(cls):
'''
Tests Binomial family with canonical logit link.
'''
super(TestWtdGlmBinomial, cls).setup_class()
cls.endog = cls.endog / 100
cls.endog_big = cls.endog_big / 100
cls.res1 = GLM(cls.endog, cls.exog,
freq_weights=cls.weight,
family=sm.families.Binomial()).fit()
cls.res2 = GLM(cls.endog_big, cls.exog_big,
family=sm.families.Binomial()).fit()
class TestWtdGlmNegativeBinomial(CheckWtdDuplicationMixin):
@classmethod
def setup_class(cls):
'''
Tests Negative Binomial family with canonical link
g(p) = log(p/(p + 1/alpha))
'''
super(TestWtdGlmNegativeBinomial, cls).setup_class()
alpha = 1.
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=DomainWarning)
family_link = sm.families.NegativeBinomial(
link=sm.families.links.nbinom(alpha=alpha),
alpha=alpha)
cls.res1 = GLM(cls.endog, cls.exog,
freq_weights=cls.weight,
family=family_link).fit()
cls.res2 = GLM(cls.endog_big, cls.exog_big,
family=family_link).fit()
class TestWtdGlmGamma(CheckWtdDuplicationMixin):
@classmethod
def setup_class(cls):
'''
Tests Gamma family with log link.
'''
super(TestWtdGlmGamma, cls).setup_class()
family_link = sm.families.Gamma(sm.families.links.log())
cls.res1 = GLM(cls.endog, cls.exog,
freq_weights=cls.weight,
family=family_link).fit()
cls.res2 = GLM(cls.endog_big, cls.exog_big,
family=family_link).fit()
class TestWtdGlmGaussian(CheckWtdDuplicationMixin):
@classmethod
def setup_class(cls):
'''
Tests Gaussian family with log link.
'''
super(TestWtdGlmGaussian, cls).setup_class()
family_link = sm.families.Gaussian(sm.families.links.log())
cls.res1 = GLM(cls.endog, cls.exog,
freq_weights=cls.weight,
family=family_link).fit()
cls.res2 = GLM(cls.endog_big, cls.exog_big,
family=family_link).fit()
class TestWtdGlmInverseGaussian(CheckWtdDuplicationMixin):
@classmethod
def setup_class(cls):
'''
Tests InverseGaussian family with log link.
'''
super(TestWtdGlmInverseGaussian, cls).setup_class()
family_link = sm.families.InverseGaussian(sm.families.links.log())
cls.res1 = GLM(cls.endog, cls.exog,
freq_weights=cls.weight,
family=family_link).fit()
cls.res2 = GLM(cls.endog_big, cls.exog_big,
family=family_link).fit()
class TestWtdGlmGammaNewton(CheckWtdDuplicationMixin):
@classmethod
def setup_class(cls):
'''
Tests Gamma family with log link.
'''
super(TestWtdGlmGammaNewton, cls).setup_class()
family_link = sm.families.Gamma(sm.families.links.log())
cls.res1 = GLM(cls.endog, cls.exog,
freq_weights=cls.weight,
family=family_link
).fit(method='newton')
cls.res2 = GLM(cls.endog_big, cls.exog_big,
family=family_link
).fit(method='newton')
def test_init_kwargs(self):
family_link = sm.families.Gamma(sm.families.links.log())
with pytest.warns(ValueWarning, match="unknown kwargs"):
GLM(self.endog, self.exog, family=family_link,
weights=self.weight, # incorrect keyword
)
class TestWtdGlmGammaScale_X2(CheckWtdDuplicationMixin):
@classmethod
def setup_class(cls):
'''
Tests Gamma family with log link.
'''
super(TestWtdGlmGammaScale_X2, cls).setup_class()
family_link = sm.families.Gamma(sm.families.links.log())
cls.res1 = GLM(cls.endog, cls.exog,
freq_weights=cls.weight,
family=family_link,
).fit(scale='X2')
cls.res2 = GLM(cls.endog_big, cls.exog_big,
family=family_link,
).fit(scale='X2')
class TestWtdGlmGammaScale_dev(CheckWtdDuplicationMixin):
@classmethod
def setup_class(cls):
'''
Tests Gamma family with log link.
'''
super(TestWtdGlmGammaScale_dev, cls).setup_class()
family_link = sm.families.Gamma(sm.families.links.log())
cls.res1 = GLM(cls.endog, cls.exog,
freq_weights=cls.weight,
family=family_link,
).fit(scale='dev')
cls.res2 = GLM(cls.endog_big, cls.exog_big,
family=family_link,
).fit(scale='dev')
def test_missing(self):
endog = self.data.endog.copy()
exog = self.data.exog.copy()
exog[0, 0] = np.nan
endog[[2, 4, 6, 8]] = np.nan
freq_weights = self.weight
mod_misisng = GLM(endog, exog, family=self.res1.model.family,
freq_weights=freq_weights, missing='drop')
assert_equal(mod_misisng.freq_weights.shape[0],
mod_misisng.endog.shape[0])
assert_equal(mod_misisng.freq_weights.shape[0],
mod_misisng.exog.shape[0])
keep_idx = np.array([1, 3, 5, 7, 9, 10, 11, 12, 13, 14, 15, 16])
assert_equal(mod_misisng.freq_weights, self.weight[keep_idx])
class TestWtdTweedieLog(CheckWtdDuplicationMixin):
@classmethod
def setup_class(cls):
'''
Tests Tweedie family with log link and var_power=1.
'''
super(TestWtdTweedieLog, cls).setup_class()
family_link = sm.families.Tweedie(link=sm.families.links.log(),
var_power=1)
cls.res1 = GLM(cls.endog, cls.exog,
freq_weights=cls.weight,
family=family_link).fit()
cls.res2 = GLM(cls.endog_big, cls.exog_big,
family=family_link).fit()
class TestWtdTweediePower2(CheckWtdDuplicationMixin):
@classmethod
def setup_class(cls):
'''
Tests Tweedie family with Power(1) link and var_power=2.
'''
cls.data = cpunish.load_pandas()
cls.endog = cls.data.endog
cls.exog = cls.data.exog[['INCOME', 'SOUTH']]
np.random.seed(1234)
cls.weight = np.random.randint(5, 100, len(cls.endog))
cls.endog_big = np.repeat(cls.endog.values, cls.weight)
cls.exog_big = np.repeat(cls.exog.values, cls.weight, axis=0)
link = sm.families.links.Power()
family_link = sm.families.Tweedie(link=link, var_power=2)
cls.res1 = GLM(cls.endog, cls.exog,
freq_weights=cls.weight,
family=family_link).fit()
cls.res2 = GLM(cls.endog_big, cls.exog_big,
family=family_link).fit()
class TestWtdTweediePower15(CheckWtdDuplicationMixin):
@classmethod
def setup_class(cls):
'''
Tests Tweedie family with Power(0.5) link and var_power=1.5.
'''
super(TestWtdTweediePower15, cls).setup_class()
family_link = sm.families.Tweedie(link=sm.families.links.Power(0.5),
var_power=1.5)
cls.res1 = GLM(cls.endog, cls.exog,
freq_weights=cls.weight,
family=family_link).fit()
cls.res2 = GLM(cls.endog_big, cls.exog_big,
family=family_link).fit()
def test_wtd_patsy_missing():
import pandas as pd
data = cpunish.load()
data.endog = np.asarray(data.endog)
data.exog = np.asarray(data.exog)
data.exog[0, 0] = np.nan
data.endog[[2, 4, 6, 8]] = np.nan
data.pandas = pd.DataFrame(data.exog, columns=data.exog_name)
data.pandas['EXECUTIONS'] = data.endog
weights = np.arange(1, len(data.endog)+1)
formula = """EXECUTIONS ~ INCOME + PERPOVERTY + PERBLACK + VC100k96 +
SOUTH + DEGREE"""
mod_misisng = GLM.from_formula(formula, data=data.pandas,
freq_weights=weights)
assert_equal(mod_misisng.freq_weights.shape[0],
mod_misisng.endog.shape[0])
assert_equal(mod_misisng.freq_weights.shape[0],
mod_misisng.exog.shape[0])
assert_equal(mod_misisng.freq_weights.shape[0], 12)
keep_weights = np.array([2, 4, 6, 8, 10, 11, 12, 13, 14, 15, 16, 17])
assert_equal(mod_misisng.freq_weights, keep_weights)
class CheckTweedie(object):
def test_resid(self):
idx1 = len(self.res1.resid_response) - 1
idx2 = len(self.res2.resid_response) - 1
assert_allclose(np.concatenate((self.res1.resid_response[:17],
[self.res1.resid_response[idx1]])),
np.concatenate((self.res2.resid_response[:17],
[self.res2.resid_response[idx2]])),
rtol=1e-5, atol=1e-5)
assert_allclose(np.concatenate((self.res1.resid_pearson[:17],
[self.res1.resid_pearson[idx1]])),
np.concatenate((self.res2.resid_pearson[:17],
[self.res2.resid_pearson[idx2]])),
rtol=1e-5, atol=1e-5)
assert_allclose(np.concatenate((self.res1.resid_deviance[:17],
[self.res1.resid_deviance[idx1]])),
np.concatenate((self.res2.resid_deviance[:17],
[self.res2.resid_deviance[idx2]])),
rtol=1e-5, atol=1e-5)
assert_allclose(np.concatenate((self.res1.resid_working[:17],
[self.res1.resid_working[idx1]])),
np.concatenate((self.res2.resid_working[:17],
[self.res2.resid_working[idx2]])),
rtol=1e-5, atol=1e-5)
def test_bse(self):
assert_allclose(self.res1.bse, self.res2.bse, atol=1e-6, rtol=1e6)
def test_params(self):
assert_allclose(self.res1.params, self.res2.params, atol=1e-5,
rtol=1e-5)
def test_deviance(self):
assert_allclose(self.res1.deviance, self.res2.deviance, atol=1e-6,
rtol=1e-6)
def test_df(self):
assert_equal(self.res1.df_model, self.res2.df_model)
assert_equal(self.res1.df_resid, self.res2.df_resid)
def test_fittedvalues(self):
idx1 = len(self.res1.fittedvalues) - 1
idx2 = len(self.res2.resid_response) - 1
assert_allclose(np.concatenate((self.res1.fittedvalues[:17],
[self.res1.fittedvalues[idx1]])),
np.concatenate((self.res2.fittedvalues[:17],
[self.res2.fittedvalues[idx2]])),
atol=1e-4, rtol=1e-4)
def test_summary(self):
self.res1.summary()
self.res1.summary2()
class TestTweediePower15(CheckTweedie):
@classmethod
def setup_class(cls):
from .results.results_glm import CpunishTweediePower15
cls.data = cpunish.load_pandas()
cls.exog = cls.data.exog[['INCOME', 'SOUTH']]
cls.endog = cls.data.endog
family_link = sm.families.Tweedie(link=sm.families.links.Power(1),
var_power=1.5)
cls.res1 = sm.GLM(endog=cls.data.endog,
exog=cls.data.exog[['INCOME', 'SOUTH']],
family=family_link).fit()
cls.res2 = CpunishTweediePower15()
class TestTweediePower2(CheckTweedie):
@classmethod
def setup_class(cls):
from .results.results_glm import CpunishTweediePower2
cls.data = cpunish.load_pandas()
cls.exog = cls.data.exog[['INCOME', 'SOUTH']]
cls.endog = cls.data.endog
family_link = sm.families.Tweedie(link=sm.families.links.Power(1),
var_power=2.)
cls.res1 = sm.GLM(endog=cls.data.endog,
exog=cls.data.exog[['INCOME', 'SOUTH']],
family=family_link).fit()
cls.res2 = CpunishTweediePower2()
class TestTweedieLog1(CheckTweedie):
@classmethod
def setup_class(cls):
from .results.results_glm import CpunishTweedieLog1
cls.data = cpunish.load_pandas()
cls.exog = cls.data.exog[['INCOME', 'SOUTH']]
cls.endog = cls.data.endog
family_link = sm.families.Tweedie(link=sm.families.links.log(),
var_power=1.)
cls.res1 = sm.GLM(endog=cls.data.endog,
exog=cls.data.exog[['INCOME', 'SOUTH']],
family=family_link).fit()
cls.res2 = CpunishTweedieLog1()
class TestTweedieLog15Fair(CheckTweedie):
@classmethod
def setup_class(cls):
from statsmodels.datasets.fair import load_pandas
from .results.results_glm import FairTweedieLog15
data = load_pandas()
family_link = sm.families.Tweedie(link=sm.families.links.log(),
var_power=1.5)
cls.res1 = sm.GLM(endog=data.endog,
exog=data.exog[['rate_marriage', 'age',
'yrs_married']],
family=family_link).fit()
cls.res2 = FairTweedieLog15()
class CheckTweedieSpecial(object):
def test_mu(self):
assert_allclose(self.res1.mu, self.res2.mu, rtol=1e-5, atol=1e-5)
def test_resid(self):
assert_allclose(self.res1.resid_response, self.res2.resid_response,
rtol=1e-5, atol=1e-5)
assert_allclose(self.res1.resid_pearson, self.res2.resid_pearson,
rtol=1e-5, atol=1e-5)
assert_allclose(self.res1.resid_deviance, self.res2.resid_deviance,
rtol=1e-5, atol=1e-5)
assert_allclose(self.res1.resid_working, self.res2.resid_working,
rtol=1e-5, atol=1e-5)
assert_allclose(self.res1.resid_anscombe_unscaled,
self.res2.resid_anscombe_unscaled,
rtol=1e-5, atol=1e-5)
class TestTweedieSpecialLog0(CheckTweedieSpecial):
@classmethod
def setup_class(cls):
cls.data = cpunish.load_pandas()
cls.exog = cls.data.exog[['INCOME', 'SOUTH']]
cls.endog = cls.data.endog
family1 = sm.families.Gaussian(link=sm.families.links.log())
cls.res1 = sm.GLM(endog=cls.data.endog,
exog=cls.data.exog[['INCOME', 'SOUTH']],
family=family1).fit()
family2 = sm.families.Tweedie(link=sm.families.links.log(),
var_power=0)
cls.res2 = sm.GLM(endog=cls.data.endog,
exog=cls.data.exog[['INCOME', 'SOUTH']],
family=family2).fit()
class TestTweedieSpecialLog1(CheckTweedieSpecial):
@classmethod
def setup_class(cls):
cls.data = cpunish.load_pandas()
cls.exog = cls.data.exog[['INCOME', 'SOUTH']]
cls.endog = cls.data.endog
family1 = sm.families.Poisson(link=sm.families.links.log())
cls.res1 = sm.GLM(endog=cls.data.endog,
exog=cls.data.exog[['INCOME', 'SOUTH']],
family=family1).fit()
family2 = sm.families.Tweedie(link=sm.families.links.log(),
var_power=1)
cls.res2 = sm.GLM(endog=cls.data.endog,
exog=cls.data.exog[['INCOME', 'SOUTH']],
family=family2).fit()
class TestTweedieSpecialLog2(CheckTweedieSpecial):
@classmethod
def setup_class(cls):
cls.data = cpunish.load_pandas()
cls.exog = cls.data.exog[['INCOME', 'SOUTH']]
cls.endog = cls.data.endog
family1 = sm.families.Gamma(link=sm.families.links.log())
cls.res1 = sm.GLM(endog=cls.data.endog,
exog=cls.data.exog[['INCOME', 'SOUTH']],
family=family1).fit()
family2 = sm.families.Tweedie(link=sm.families.links.log(),
var_power=2)
cls.res2 = sm.GLM(endog=cls.data.endog,
exog=cls.data.exog[['INCOME', 'SOUTH']],
family=family2).fit()
class TestTweedieSpecialLog3(CheckTweedieSpecial):
@classmethod
def setup_class(cls):
cls.data = cpunish.load_pandas()
cls.exog = cls.data.exog[['INCOME', 'SOUTH']]
cls.endog = cls.data.endog
family1 = sm.families.InverseGaussian(link=sm.families.links.log())
cls.res1 = sm.GLM(endog=cls.data.endog,
exog=cls.data.exog[['INCOME', 'SOUTH']],
family=family1).fit()
family2 = sm.families.Tweedie(link=sm.families.links.log(),
var_power=3)
cls.res2 = sm.GLM(endog=cls.data.endog,
exog=cls.data.exog[['INCOME', 'SOUTH']],
family=family2).fit()
def gen_tweedie(p):
np.random.seed(3242)
n = 500
x = np.random.normal(size=(n, 4))
lpr = np.dot(x, np.r_[1, -1, 0, 0.5])
mu = np.exp(lpr)
lam = 10 * mu**(2 - p) / (2 - p)
alp = (2 - p) / (p - 1)
bet = 10 * mu**(1 - p) / (p - 1)
# Generate Tweedie values using commpound Poisson distribution
y = np.empty(n)
N = np.random.poisson(lam)
for i in range(n):
y[i] = np.random.gamma(alp, 1 / bet[i], N[i]).sum()
return y, x
@pytest.mark.filterwarnings("ignore:GLM ridge optimization")
def test_tweedie_EQL():
# All tests below are regression tests, but the results
# are very close to the population values.
p = 1.5
y, x = gen_tweedie(p)
# Un-regularized fit using gradients
fam = sm.families.Tweedie(var_power=p, eql=True)
model1 = sm.GLM(y, x, family=fam)
result1 = model1.fit(method="newton")
assert_allclose(result1.params,
np.array([1.00350497, -0.99656954, 0.00802702, 0.50713209]),
rtol=1e-5, atol=1e-5)
# Un-regularized fit using IRLS
model1x = sm.GLM(y, x, family=fam)
result1x = model1x.fit(method="irls")
assert_allclose(result1.params, result1x.params)
assert_allclose(result1.bse, result1x.bse, rtol=1e-2)
# Lasso fit using coordinate-wise descent
# TODO: The search gets trapped in an infinite oscillation, so use
# a slack convergence tolerance.
model2 = sm.GLM(y, x, family=fam)
result2 = model2.fit_regularized(L1_wt=1, alpha=0.07, maxiter=200,
cnvrg_tol=0.01)
rtol, atol = 1e-2, 1e-4
assert_allclose(result2.params,
np.array([0.976831, -0.952854, 0., 0.470171]),
rtol=rtol, atol=atol)
# Series of ridge fits using gradients
ev = (np.array([1.001778, -0.99388, 0.00797, 0.506183]),
np.array([0.98586638, -0.96953481, 0.00749983, 0.4975267]),
np.array([0.206429, -0.164547, 0.000235, 0.102489]))
for j, alpha in enumerate([0.05, 0.5, 0.7]):
model3 = sm.GLM(y, x, family=fam)
result3 = model3.fit_regularized(L1_wt=0, alpha=alpha)
assert_allclose(result3.params, ev[j], rtol=rtol, atol=atol)
result4 = model3.fit_regularized(L1_wt=0, alpha=alpha * np.ones(x.shape[1]))
assert_allclose(result4.params, result3.params, rtol=rtol, atol=atol)
alpha = alpha * np.ones(x.shape[1])
alpha[0] = 0
result5 = model3.fit_regularized(L1_wt=0, alpha=alpha)
assert not np.allclose(result5.params, result4.params)
def test_tweedie_elastic_net():
# Check that the coefficients vanish one-by-one
# when using the elastic net.
p = 1.5 # Tweedie variance exponent
y, x = gen_tweedie(p)
# Un-regularized fit using gradients
fam = sm.families.Tweedie(var_power=p, eql=True)
model1 = sm.GLM(y, x, family=fam)
nnz = []
for alpha in np.linspace(0, 10, 20):
result1 = model1.fit_regularized(L1_wt=0.5, alpha=alpha)
nnz.append((np.abs(result1.params) > 0).sum())
nnz = np.unique(nnz)
assert len(nnz) == 5
def test_tweedie_EQL_poisson_limit():
# Test the limiting Poisson case of the Nelder/Pregibon/Tweedie
# EQL.
np.random.seed(3242)
n = 500
x = np.random.normal(size=(n, 3))
x[:, 0] = 1
lpr = 4 + x[:, 1:].sum(1)
mn = np.exp(lpr)
y = np.random.poisson(mn)
for scale in 1.0, 'x2', 'dev':
# Un-regularized fit using gradients not IRLS
fam = sm.families.Tweedie(var_power=1, eql=True)
model1 = sm.GLM(y, x, family=fam)
result1 = model1.fit(method="newton", scale=scale)
# Poisson GLM
model2 = sm.GLM(y, x, family=sm.families.Poisson())
result2 = model2.fit(method="newton", scale=scale)
assert_allclose(result1.params, result2.params, atol=1e-6, rtol=1e-6)
assert_allclose(result1.bse, result2.bse, 1e-6, 1e-6)
def test_tweedie_EQL_upper_limit():
# Test the limiting case of the Nelder/Pregibon/Tweedie
# EQL with var = mean^2. These are tests against population
# values so accuracy is not high.
np.random.seed(3242)
n = 500
x = np.random.normal(size=(n, 3))
x[:, 0] = 1
lpr = 4 + x[:, 1:].sum(1)
mn = np.exp(lpr)
y = np.random.poisson(mn)
for scale in 'x2', 'dev', 1.0:
# Un-regularized fit using gradients not IRLS
fam = sm.families.Tweedie(var_power=2, eql=True)
model1 = sm.GLM(y, x, family=fam)
result1 = model1.fit(method="newton", scale=scale)
assert_allclose(result1.params, np.r_[4, 1, 1], atol=1e-3, rtol=1e-1)
def testTweediePowerEstimate():
# Test the Pearson estimate of the Tweedie variance and scale parameters.
#
# Ideally, this would match the following R code, but I cannot make it work...
#
# setwd('c:/workspace')
# data <- read.csv('cpunish.csv', sep=",")
#
# library(tweedie)
#
# y <- c(1.00113835e+05, 6.89668315e+03, 6.15726842e+03,
# 1.41718806e+03, 5.11776456e+02, 2.55369154e+02,
# 1.07147443e+01, 3.56874698e+00, 4.06797842e-02,
# 7.06996731e-05, 2.10165106e-07, 4.34276938e-08,
# 1.56354040e-09, 0.00000000e+00, 0.00000000e+00,
# 0.00000000e+00, 0.00000000e+00)
#
# data$NewY <- y
#
# out <- tweedie.profile( NewY ~ INCOME + SOUTH - 1,
# p.vec=c(1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7, 1.8,
# 1.9), link.power=0,
# data=data,do.plot = TRUE)
data = cpunish.load_pandas()
y = [1.00113835e+05, 6.89668315e+03, 6.15726842e+03,
1.41718806e+03, 5.11776456e+02, 2.55369154e+02,
1.07147443e+01, 3.56874698e+00, 4.06797842e-02,
7.06996731e-05, 2.10165106e-07, 4.34276938e-08,
1.56354040e-09, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00]
model1 = sm.GLM(y, data.exog[['INCOME', 'SOUTH']],
family=sm.families.Tweedie(link=sm.families.links.log(),
var_power=1.5))
res1 = model1.fit()
model2 = sm.GLM((y - res1.mu) ** 2,
np.column_stack((np.ones(len(res1.mu)), np.log(res1.mu))),
family=sm.families.Gamma(sm.families.links.log()))
res2 = model2.fit()
# Sample may be too small for this...
# assert_allclose(res1.scale, np.exp(res2.params[0]), rtol=0.25)
p = model1.estimate_tweedie_power(res1.mu)
assert_allclose(p, res2.params[1], rtol=0.25)
def test_glm_lasso_6431():
# Based on issue #6431
# Fails with newton-cg as optimizer
np.random.seed(123)
from statsmodels.regression.linear_model import OLS
n = 50
x = np.ones((n, 2))
x[:, 1] = np.arange(0, n)
y = 1000 + x[:, 1] + np.random.normal(0, 1, n)
params = np.r_[999.82244338, 1.0077889]
for method in "bfgs", None:
for fun in [OLS, GLM]:
# Changing L1_wtValue from 0 to 1e-9 changes
# the algorithm from scipy gradient optimization
# to statsmodels coordinate descent
for L1_wtValue in [0, 1e-9]:
model = fun(y, x)
if fun == OLS:
fit = model.fit_regularized(alpha=0, L1_wt=L1_wtValue)
else:
fit = model._fit_ridge(alpha=0, start_params=None, method=method)
assert_allclose(params, fit.params, atol=1e-6, rtol=1e-6)
class TestRegularized(object):
def test_regularized(self):
import os
from .results import glmnet_r_results
for dtype in "binomial", "poisson":
cur_dir = os.path.dirname(os.path.abspath(__file__))
data = np.loadtxt(os.path.join(cur_dir, "results", "enet_%s.csv" % dtype),
delimiter=",")
endog = data[:, 0]
exog = data[:, 1:]
fam = {"binomial" : sm.families.Binomial,
"poisson" : sm.families.Poisson}[dtype]
for j in range(9):
vn = "rslt_%s_%d" % (dtype, j)
r_result = getattr(glmnet_r_results, vn)
L1_wt = r_result[0]
alpha = r_result[1]
params = r_result[2:]
model = GLM(endog, exog, family=fam())
sm_result = model.fit_regularized(L1_wt=L1_wt, alpha=alpha)
# Agreement is OK, see below for further check
assert_allclose(params, sm_result.params, atol=1e-2, rtol=0.3)
# The penalized log-likelihood that we are maximizing.
def plf(params):
llf = model.loglike(params) / len(endog)
llf = llf - alpha * ((1 - L1_wt)*np.sum(params**2) / 2 + L1_wt*np.sum(np.abs(params)))
return llf
# Confirm that we are doing better than glmnet.
llf_r = plf(params)
llf_sm = plf(sm_result.params)
assert_equal(np.sign(llf_sm - llf_r), 1)
class TestConvergence(object):
@classmethod
def setup_class(cls):
'''
Test Binomial family with canonical logit link using star98 dataset.
'''
from statsmodels.datasets.star98 import load
data = load()
data.exog = add_constant(data.exog, prepend=False)
cls.model = GLM(data.endog, data.exog,
family=sm.families.Binomial())
def _when_converged(self, atol=1e-8, rtol=0, tol_criterion='deviance'):
for i, dev in enumerate(self.res.fit_history[tol_criterion]):
orig = self.res.fit_history[tol_criterion][i]
new = self.res.fit_history[tol_criterion][i + 1]
if np.allclose(orig, new, atol=atol, rtol=rtol):
return i
raise ValueError('CONVERGENCE CHECK: It seems this doens\'t converge!')
def test_convergence_atol_only(self):
atol = 1e-8
rtol = 0
self.res = self.model.fit(atol=atol, rtol=rtol)
expected_iterations = self._when_converged(atol=atol, rtol=rtol)
actual_iterations = self.res.fit_history['iteration']
# Note the first value is the list is np.inf. The second value
# is the initial guess based off of start_params or the
# estimate thereof. The third value (index = 2) is the actual "first
# iteration"
assert_equal(expected_iterations, actual_iterations)
assert_equal(len(self.res.fit_history['deviance']) - 2,
actual_iterations)
def test_convergence_rtol_only(self):
atol = 0
rtol = 1e-8
self.res = self.model.fit(atol=atol, rtol=rtol)
expected_iterations = self._when_converged(atol=atol, rtol=rtol)
actual_iterations = self.res.fit_history['iteration']
# Note the first value is the list is np.inf. The second value
# is the initial guess based off of start_params or the
# estimate thereof. The third value (index = 2) is the actual "first
# iteration"
assert_equal(expected_iterations, actual_iterations)
assert_equal(len(self.res.fit_history['deviance']) - 2,
actual_iterations)
def test_convergence_atol_rtol(self):
atol = 1e-8
rtol = 1e-8
self.res = self.model.fit(atol=atol, rtol=rtol)
expected_iterations = self._when_converged(atol=atol, rtol=rtol)
actual_iterations = self.res.fit_history['iteration']
# Note the first value is the list is np.inf. The second value
# is the initial guess based off of start_params or the
# estimate thereof. The third value (index = 2) is the actual "first
# iteration"
assert_equal(expected_iterations, actual_iterations)
assert_equal(len(self.res.fit_history['deviance']) - 2,
actual_iterations)
def test_convergence_atol_only_params(self):
atol = 1e-8
rtol = 0
self.res = self.model.fit(atol=atol, rtol=rtol, tol_criterion='params')
expected_iterations = self._when_converged(atol=atol, rtol=rtol,
tol_criterion='params')
actual_iterations = self.res.fit_history['iteration']
# Note the first value is the list is np.inf. The second value
# is the initial guess based off of start_params or the
# estimate thereof. The third value (index = 2) is the actual "first
# iteration"
assert_equal(expected_iterations, actual_iterations)
assert_equal(len(self.res.fit_history['deviance']) - 2,
actual_iterations)
def test_convergence_rtol_only_params(self):
atol = 0
rtol = 1e-8
self.res = self.model.fit(atol=atol, rtol=rtol, tol_criterion='params')
expected_iterations = self._when_converged(atol=atol, rtol=rtol,
tol_criterion='params')
actual_iterations = self.res.fit_history['iteration']
# Note the first value is the list is np.inf. The second value
# is the initial guess based off of start_params or the
# estimate thereof. The third value (index = 2) is the actual "first
# iteration"
assert_equal(expected_iterations, actual_iterations)
assert_equal(len(self.res.fit_history['deviance']) - 2,
actual_iterations)
def test_convergence_atol_rtol_params(self):
atol = 1e-8
rtol = 1e-8
self.res = self.model.fit(atol=atol, rtol=rtol, tol_criterion='params')
expected_iterations = self._when_converged(atol=atol, rtol=rtol,
tol_criterion='params')
actual_iterations = self.res.fit_history['iteration']
# Note the first value is the list is np.inf. The second value
# is the initial guess based off of start_params or the
# estimate thereof. The third value (index = 2) is the actual "first
# iteration"
assert_equal(expected_iterations, actual_iterations)
assert_equal(len(self.res.fit_history['deviance']) - 2,
actual_iterations)
def test_poisson_deviance():
# see #3355 missing term in deviance if resid_response.sum() != 0
np.random.seed(123987)
nobs, k_vars = 50, 3-1
x = sm.add_constant(np.random.randn(nobs, k_vars))
mu_true = np.exp(x.sum(1))
y = np.random.poisson(mu_true, size=nobs)
mod = sm.GLM(y, x[:, :], family=sm.genmod.families.Poisson())
res = mod.fit()
d_i = res.resid_deviance
d = res.deviance
lr = (mod.family.loglike(y, y+1e-20) -
mod.family.loglike(y, res.fittedvalues)) * 2
assert_allclose(d, (d_i**2).sum(), rtol=1e-12)
assert_allclose(d, lr, rtol=1e-12)
# case without constant, resid_response.sum() != 0
mod_nc = sm.GLM(y, x[:, 1:], family=sm.genmod.families.Poisson())
res_nc = mod_nc.fit()
d_i = res_nc.resid_deviance
d = res_nc.deviance
lr = (mod.family.loglike(y, y+1e-20) -
mod.family.loglike(y, res_nc.fittedvalues)) * 2
assert_allclose(d, (d_i**2).sum(), rtol=1e-12)
assert_allclose(d, lr, rtol=1e-12)
def test_non_invertible_hessian_fails_summary():
# Test when the hessian fails the summary is still available.
data = cpunish.load_pandas()
data.endog[:] = 1
with warnings.catch_warnings():
# we filter DomainWarning, the convergence problems
# and warnings in summary
warnings.simplefilter("ignore")
mod = sm.GLM(data.endog, data.exog, family=sm.families.Gamma())
res = mod.fit(maxiter=1, method='bfgs', max_start_irls=0)
res.summary()
def test_int_scale():
# GH-6627, make sure it works with int scale
data = longley.load()
mod = GLM(data.endog, data.exog, family=sm.families.Gaussian())
res = mod.fit(scale=1)
assert isinstance(res.params, pd.Series)
assert res.scale.dtype == np.float64
@pytest.mark.parametrize("dtype", [np.int8, np.int16, np.int32, np.int64])
def test_int_exog(dtype):
# GH-6627, make use of floats internally
count1, n1, count2, n2 = 60, 51477.5, 30, 54308.7
y = [count1, count2]
x = np.asarray([[1, 1], [1, 0]]).astype(dtype)
exposure = np.asarray([n1, n2])
mod = GLM(y, x, exposure=exposure, family=sm.families.Poisson())
res = mod.fit(method='bfgs', max_start_irls=0)
assert isinstance(res.params, np.ndarray)
def test_glm_bic(iris):
X = np.c_[np.ones(100), iris[50:, :4]]
y = np.array(iris)[50:, 4].astype(np.int32)
y -= 1
SET_USE_BIC_LLF(True)
model = GLM(y, X, family=sm.families.Binomial()).fit()
# 34.9244 is what glm() of R yields
assert_almost_equal(model.bic, 34.9244, decimal=3)
assert_almost_equal(model.bic_llf, 34.9244, decimal=3)
SET_USE_BIC_LLF(False)
assert_almost_equal(model.bic, model.bic_deviance, decimal=3)
SET_USE_BIC_LLF(None)
def test_glm_bic_warning(iris):
X = np.c_[np.ones(100), iris[50:, :4]]
y = np.array(iris)[50:, 4].astype(np.int32)
y -= 1
model = GLM(y, X, family=sm.families.Binomial()).fit()
with pytest.warns(FutureWarning, match="The bic"):
assert isinstance(model.bic, float)
def test_output_exposure_null(reset_randomstate):
# GH 6953
x0 = [np.sin(i / 20) + 2 for i in range(1000)]
rs = np.random.RandomState(0)
# Variable exposures for each observation
exposure = rs.randint(100, 200, size=1000)
y = [np.sum(rs.poisson(x, size=e)) for x, e in zip(x0, exposure)]
x = add_constant(x0)
model = GLM(
endog=y, exog=x, exposure=exposure, family=sm.families.Poisson()
).fit()
null_model = GLM(
endog=y, exog=x[:, 0], exposure=exposure, family=sm.families.Poisson()
).fit()
null_model_without_exposure = GLM(
endog=y, exog=x[:, 0], family=sm.families.Poisson()
).fit()
assert_allclose(model.llnull, null_model.llf)
# Check that they are different
assert np.abs(null_model_without_exposure.llf - model.llnull) > 1
def test_qaic():
# Example from documentation of R package MuMIn
import patsy
ldose = np.concatenate((np.arange(6), np.arange(6)))
sex = ["M"]*6 + ["F"]*6
numdead = [10, 4, 9, 12, 18, 20, 0, 2, 6, 10, 12, 16]
df = pd.DataFrame({"ldose": ldose, "sex": sex, "numdead": numdead})
df["numalive"] = 20 - df["numdead"]
df["SF"] = df["numdead"]
y = df[["numalive", "numdead"]].values
x = patsy.dmatrix("sex*ldose", data=df, return_type='dataframe')
m = GLM(y, x, family=sm.families.Binomial())
r = m.fit()
scale = 2.412699
qaic = r.info_criteria(crit="qaic", scale=scale)
# R gives 31.13266 because it uses a df that is 1 greater,
# presumably because they count the scale parameter in df.
# This won't matter when comparing models by differencing
# QAICs.
# Binomial doesn't have a scale parameter, so adding +1 is not correct.
assert_allclose(qaic, 29.13266, rtol=1e-5, atol=1e-5)
qaic1 = r.info_criteria(crit="qaic", scale=scale, dk_params=1)
assert_allclose(qaic1, 31.13266, rtol=1e-5, atol=1e-5)
def test_tweedie_score():
np.random.seed(3242)
n = 500
x = np.random.normal(size=(n, 4))
lpr = np.dot(x, np.r_[1, -1, 0, 0.5])
mu = np.exp(lpr)
p0 = 1.5
lam = 10 * mu**(2 - p0) / (2 - p0)
alp = (2 - p0) / (p0 - 1)
bet = 10 * mu**(1 - p0) / (p0 - 1)
y = np.empty(n)
N = np.random.poisson(lam)
for i in range(n):
y[i] = np.random.gamma(alp, 1 / bet[i], N[i]).sum()
for p in [1, 1.5, 2]:
fam = sm.families.Tweedie(var_power=p, eql=True)
model = GLM(y, x, family=fam)
result = model.fit()
pa = result.params + 0.2*np.random.normal(size=result.params.size)
ngrad = approx_fprime_cs(pa, lambda x: model.loglike(x, scale=1))
agrad = model.score(pa, scale=1)
assert_allclose(ngrad, agrad, atol=1e-8, rtol=1e-8)
nhess = approx_hess_cs(pa, lambda x: model.loglike(x, scale=1))
ahess = model.hessian(pa, scale=1)
assert_allclose(nhess, ahess, atol=5e-8, rtol=5e-8)
|
bashtage/statsmodels
|
statsmodels/genmod/tests/test_glm.py
|
Python
|
bsd-3-clause
| 100,877
|
[
"Gaussian"
] |
b1ed1200e4b1f8f8e03fb5f46adb43beb90261db8f4d3f808bbc29d54c718c68
|
import tensorflow as tf
def cross_matrices(tensor_a, a_inputs, tensor_b, b_inputs):
"""Tiles two tensors in perpendicular dimensions."""
expanded_a = tf.expand_dims(tensor_a, 1)
expanded_b = tf.expand_dims(tensor_b, 0)
tiled_a = tf.tile(expanded_a, tf.constant([1, b_inputs, 1]))
tiled_b = tf.tile(expanded_b, tf.constant([a_inputs, 1, 1]))
return [tiled_a, tiled_b]
def linear_kernel(tensor_a, a_inputs, tensor_b, b_inputs):
"""Returns the linear kernel (dot product) matrix of two matrices of vectors
element-wise."""
cross = cross_matrices(tensor_a, a_inputs, tensor_b, b_inputs)
kernel = tf.reduce_sum(tf.mul(cross[0], cross[1]), reduction_indices=2)
return kernel
def gaussian_kernel(tensor_a, a_inputs, tensor_b, b_inputs, gamma):
"""Returns the Gaussian kernel matrix of two matrices of vectors
element-wise."""
cross = cross_matrices(tensor_a, a_inputs, tensor_b, b_inputs)
kernel = tf.exp(tf.mul(tf.reduce_sum(tf.square(
tf.sub(cross[0], cross[1])), reduction_indices=2),
tf.neg(tf.constant(gamma, dtype=tf.float32))))
return kernel
def cost(train_data, train_labels, inputs, kernel_type="gaussian", C=1, gamma=1):
"""Returns the kernelised cost to be minimised."""
beta = tf.Variable(tf.zeros([inputs, 1]))
offset = tf.Variable(tf.zeros([1]))
if kernel_type == "linear":
kernel = linear_kernel(train_data, inputs, train_data, inputs)
elif kernel_type == "gaussian":
kernel = gaussian_kernel(train_data, inputs, train_data, inputs, gamma)
kernel_matmul = tf.matmul(tf.matmul(beta, kernel, transpose_a=True), beta)
first_term = tf.reshape(tf.div(kernel_matmul, tf.constant([2.0])), [1])
t = tf.add(tf.matmul(kernel, beta, transpose_a=True), offset)
one_minus_yt = tf.sub(tf.ones([1]), tf.mul(train_labels, t))
linear_loss = tf.reduce_max(tf.concat(1, [one_minus_yt, tf.zeros_like(one_minus_yt)]), reduction_indices=1)
second_term = tf.mul(tf.constant([C], dtype=tf.float32), linear_loss)
cost_function = tf.add(first_term, second_term)
return beta, offset, cost_function
def decide(training, training_instances, testing, testing_instances,
beta, offset, kernel_type="gaussian", gamma=1):
"""Tests a set of test instances."""
if kernel_type == "linear":
kernel = linear_kernel(
testing, testing_instances, training, training_instances)
elif kernel_type == "gaussian":
kernel = gaussian_kernel(
testing, testing_instances, training, training_instances, gamma)
return tf.sign(tf.add(tf.matmul(kernel, beta), offset))
|
Kkari/bsc_thesis
|
svm.py
|
Python
|
apache-2.0
| 2,658
|
[
"Gaussian"
] |
cf5e3c51e5616898182bf5b1181abe1233754019cf6056bfdaf4bba62f596467
|
# -*- coding: utf-8 -*-
# vi:si:et:sw=4:sts=4:ts=4
##
## Copyright (C) 2012 Async Open Source <http://www.async.com.br>
## All rights reserved
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., or visit: http://www.gnu.org/.
##
## Author(s): Stoq Team <stoq-devel@async.com.br>
##
import gtk
import mock
from stoqlib.api import api
from stoqlib.domain.profile import ProfileSettings
from stoqlib.gui.base.messagebar import MessageBar
from stoqlib.gui.test.uitestutils import GUITest
import stoq
from stoq.gui.shell.shellapp import ShellApp
from stoq.gui.shell.shellwindow import ShellWindow
gtk.set_interactive(False)
class MockShellWindow(ShellWindow):
in_ui_test = True
def add_info_bar(self, message_type, label, action_widget=None):
infobar = MessageBar(label, message_type)
assert infobar is not None
if action_widget:
infobar.add_action_widget(action_widget, 0)
action_widget.show()
infobar.show()
self.main_vbox.pack_start(infobar, False, False, 0)
self.main_vbox.reorder_child(infobar, 2)
return infobar
class BaseGUITest(GUITest):
def setUp(self):
original_refresh = ShellApp.refresh
# We need to do do this mock since the store here doesn't get
# confirmed, so an action to an item that results in the results
# getting refreshed would make the results disapear
self._refresh_mock = mock.patch(
'stoq.gui.shell.shellapp.ShellApp.refresh',
new=lambda s, rollback=False: original_refresh(s, rollback=False))
self._refresh_mock.start()
super(BaseGUITest, self).setUp()
def tearDown(self):
super(BaseGUITest, self).tearDown()
self._refresh_mock.stop()
def create_app(self, window_class, app_name):
self.user = api.get_current_user(self.store)
# FIXME: Perhaps we should just ignore permission checking, it'll
# save quite a few selects
settings = self.store.find(ProfileSettings, app_dir_name=app_name,
user_profile=self.user.profile).one()
if settings is None:
settings = self.create_profile_settings(self.user.profile, app_name)
api.user_settings.set(u'actual-version', stoq.stoq_version)
self.shell = mock.Mock()
self.options = mock.Mock(spec=[u'debug'])
self.options.debug = False
self.window = MockShellWindow(self.options, self.shell, store=self.store)
self.window.in_ui_test = True
self.window.statusbar.push(0, u'Test Statusbar test')
shell_app = self.window.run_application(app_name)
assert shell_app is not None
return shell_app
|
tiagocardosos/stoq
|
stoq/gui/test/baseguitest.py
|
Python
|
gpl-2.0
| 3,342
|
[
"VisIt"
] |
4479393b41fd6c62b719c994e0f2405fcf7886091e982faa8bcbc83b69434bb2
|
#!/usr/bin/env python
"""
compliance_checker/protocols/netcdf.py
Functions to assist in determining if the URL points to a netCDF file
"""
import requests
def is_netcdf(url):
"""
Returns True if the URL points to a valid local netCDF file
:param str url: Location of file on the file system
"""
# Try an obvious exclusion of remote resources
if url.startswith("http"):
return False
# If it's a known extension, give it a shot
if url.endswith("nc"):
return True
# Brute force
with open(url, "rb") as f:
magic_number = f.read(4)
if len(magic_number) < 4:
return False
if is_classic_netcdf(magic_number):
return True
elif is_hdf5(magic_number):
return True
return False
def is_classic_netcdf(file_buffer):
"""
Returns True if the contents of the byte array matches the magic number in
netCDF files
:param str file_buffer: Byte-array of the first 4 bytes of a file
"""
# CDF.
if file_buffer == b"\x43\x44\x46\x01":
return True
return False
def is_hdf5(file_buffer):
"""
Returns True if the contents of the byte array matches the magic number in
HDF5 files
:param str file_buffer: Byte-array of the first 4 bytes of a file
"""
# .HDF
if file_buffer == b"\x89\x48\x44\x46":
return True
return False
def is_remote_netcdf(ds_str):
"""
Check a remote path points to a NetCDF resource.
Parameters
----------
ds_str (str): remote path to a dataset
Returns
-------
bool
"""
# Some datasets do not support HEAD requests! The vast majority will,
# however, support GET requests
try:
head_req = requests.head(ds_str, allow_redirects=True, timeout=10)
head_req.raise_for_status()
except:
content_type = None
else:
content_type = head_req.headers.get("content-type")
# if the Content-Type header returned was "application/x-netcdf",
# or a netCDF file (not OPeNDAP) we can open this into a Dataset
return content_type == "application/x-netcdf"
|
ocefpaf/compliance-checker
|
compliance_checker/protocols/netcdf.py
|
Python
|
apache-2.0
| 2,160
|
[
"NetCDF"
] |
2e6c8af36483875670ec18a227bdb3942aaa1cfd7918801fc07f72d2b30a780e
|
"""
Instructor Dashboard Views
"""
import logging
import datetime
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import CourseKey
import uuid
import pytz
from django.contrib.auth.decorators import login_required
from django.views.decorators.http import require_POST
from django.utils.translation import ugettext as _, ugettext_noop
from django.views.decorators.csrf import ensure_csrf_cookie
from django.views.decorators.cache import cache_control
from edxmako.shortcuts import render_to_response
from django.core.urlresolvers import reverse
from django.utils.html import escape
from django.http import Http404, HttpResponseServerError
from django.conf import settings
from util.json_request import JsonResponse
from mock import patch
from lms.djangoapps.lms_xblock.runtime import quote_slashes
from openedx.core.lib.xblock_utils import wrap_xblock
from xmodule.html_module import HtmlDescriptor
from xmodule.modulestore.django import modulestore
from xmodule.tabs import CourseTab
from xblock.field_data import DictFieldData
from xblock.fields import ScopeIds
from courseware.access import has_access
from courseware.courses import get_course_by_id, get_studio_url
from django_comment_client.utils import has_forum_access
from django_comment_common.models import FORUM_ROLE_ADMINISTRATOR
from student.models import CourseEnrollment
from shoppingcart.models import Coupon, PaidCourseRegistration, CourseRegCodeItem
from course_modes.models import CourseMode, CourseModesArchive
from student.roles import CourseFinanceAdminRole, CourseSalesAdminRole
from certificates.models import CertificateGenerationConfiguration
from certificates import api as certs_api
from class_dashboard.dashboard_data import get_section_display_name, get_array_section_has_problem
from .tools import get_units_with_due_date, title_or_url, bulk_email_is_enabled_for_course
from opaque_keys.edx.locations import SlashSeparatedCourseKey
log = logging.getLogger(__name__)
class InstructorDashboardTab(CourseTab):
"""
Defines the Instructor Dashboard view type that is shown as a course tab.
"""
type = "instructor"
title = ugettext_noop('Instructor')
view_name = "instructor_dashboard"
is_dynamic = True # The "Instructor" tab is instead dynamically added when it is enabled
@classmethod
def is_enabled(cls, course, user=None): # pylint: disable=unused-argument,redefined-outer-name
"""
Returns true if the specified user has staff access.
"""
return user and has_access(user, 'staff', course, course.id)
@ensure_csrf_cookie
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
def instructor_dashboard_2(request, course_id):
""" Display the instructor dashboard for a course. """
try:
course_key = CourseKey.from_string(course_id)
except InvalidKeyError:
log.error(u"Unable to find course with course key %s while loading the Instructor Dashboard.", course_id)
return HttpResponseServerError()
course = get_course_by_id(course_key, depth=0)
access = {
'admin': request.user.is_staff,
'instructor': has_access(request.user, 'instructor', course),
'finance_admin': CourseFinanceAdminRole(course_key).has_user(request.user),
'sales_admin': CourseSalesAdminRole(course_key).has_user(request.user),
'staff': has_access(request.user, 'staff', course),
'forum_admin': has_forum_access(request.user, course_key, FORUM_ROLE_ADMINISTRATOR),
}
if not access['staff']:
raise Http404()
is_white_label = CourseMode.is_white_label(course_key)
sections = [
_section_course_info(course, access),
_section_membership(course, access, is_white_label),
_section_cohort_management(course, access),
_section_student_admin(course, access),
_section_data_download(course, access),
]
analytics_dashboard_message = None
if settings.ANALYTICS_DASHBOARD_URL:
# Construct a URL to the external analytics dashboard
analytics_dashboard_url = '{0}/courses/{1}'.format(settings.ANALYTICS_DASHBOARD_URL, unicode(course_key))
link_start = "<a href=\"{}\" target=\"_blank\">".format(analytics_dashboard_url)
analytics_dashboard_message = _(
"To gain insights into student enrollment and participation {link_start}"
"visit {analytics_dashboard_name}, our new course analytics product{link_end}."
)
analytics_dashboard_message = analytics_dashboard_message.format(
link_start=link_start, link_end="</a>", analytics_dashboard_name=settings.ANALYTICS_DASHBOARD_NAME)
# Temporarily show the "Analytics" section until we have a better way of linking to Insights
sections.append(_section_analytics(course, access))
# Check if there is corresponding entry in the CourseMode Table related to the Instructor Dashboard course
course_mode_has_price = False
paid_modes = CourseMode.paid_modes_for_course(course_key)
if len(paid_modes) == 1:
course_mode_has_price = True
elif len(paid_modes) > 1:
log.error(
u"Course %s has %s course modes with payment options. Course must only have "
u"one paid course mode to enable eCommerce options.",
unicode(course_key), len(paid_modes)
)
if settings.FEATURES.get('INDIVIDUAL_DUE_DATES') and access['instructor']:
sections.insert(3, _section_extensions(course))
# Gate access to course email by feature flag & by course-specific authorization
if bulk_email_is_enabled_for_course(course_key):
sections.append(_section_send_email(course, access))
# Gate access to Metrics tab by featue flag and staff authorization
if settings.FEATURES['CLASS_DASHBOARD'] and access['staff']:
sections.append(_section_metrics(course, access))
# Gate access to Ecommerce tab
if course_mode_has_price and (access['finance_admin'] or access['sales_admin']):
sections.append(_section_e_commerce(course, access, paid_modes[0], is_white_label, is_white_label))
# Certificates panel
# This is used to generate example certificates
# and enable self-generated certificates for a course.
certs_enabled = CertificateGenerationConfiguration.current().enabled
if certs_enabled and access['admin']:
sections.append(_section_certificates(course))
disable_buttons = not _is_small_course(course_key)
context = {
'course': course,
'old_dashboard_url': reverse('instructor_dashboard_legacy', kwargs={'course_id': unicode(course_key)}),
'studio_url': get_studio_url(course, 'course'),
'sections': sections,
'disable_buttons': disable_buttons,
'analytics_dashboard_message': analytics_dashboard_message
}
return render_to_response('instructor/instructor_dashboard_2/instructor_dashboard_2.html', context)
## Section functions starting with _section return a dictionary of section data.
## The dictionary must include at least {
## 'section_key': 'circus_expo'
## 'section_display_name': 'Circus Expo'
## }
## section_key will be used as a css attribute, javascript tie-in, and template import filename.
## section_display_name will be used to generate link titles in the nav bar.
def _section_e_commerce(course, access, paid_mode, coupons_enabled, reports_enabled):
""" Provide data for the corresponding dashboard section """
course_key = course.id
coupons = Coupon.objects.filter(course_id=course_key).order_by('-is_active')
course_price = paid_mode.min_price
total_amount = None
if access['finance_admin']:
single_purchase_total = PaidCourseRegistration.get_total_amount_of_purchased_item(course_key)
bulk_purchase_total = CourseRegCodeItem.get_total_amount_of_purchased_item(course_key)
total_amount = single_purchase_total + bulk_purchase_total
section_data = {
'section_key': 'e-commerce',
'section_display_name': _('E-Commerce'),
'access': access,
'course_id': unicode(course_key),
'currency_symbol': settings.PAID_COURSE_REGISTRATION_CURRENCY[1],
'ajax_remove_coupon_url': reverse('remove_coupon', kwargs={'course_id': unicode(course_key)}),
'ajax_get_coupon_info': reverse('get_coupon_info', kwargs={'course_id': unicode(course_key)}),
'get_user_invoice_preference_url': reverse('get_user_invoice_preference', kwargs={'course_id': unicode(course_key)}),
'sale_validation_url': reverse('sale_validation', kwargs={'course_id': unicode(course_key)}),
'ajax_update_coupon': reverse('update_coupon', kwargs={'course_id': unicode(course_key)}),
'ajax_add_coupon': reverse('add_coupon', kwargs={'course_id': unicode(course_key)}),
'get_sale_records_url': reverse('get_sale_records', kwargs={'course_id': unicode(course_key)}),
'get_sale_order_records_url': reverse('get_sale_order_records', kwargs={'course_id': unicode(course_key)}),
'instructor_url': reverse('instructor_dashboard', kwargs={'course_id': unicode(course_key)}),
'get_registration_code_csv_url': reverse('get_registration_codes', kwargs={'course_id': unicode(course_key)}),
'generate_registration_code_csv_url': reverse('generate_registration_codes', kwargs={'course_id': unicode(course_key)}),
'active_registration_code_csv_url': reverse('active_registration_codes', kwargs={'course_id': unicode(course_key)}),
'spent_registration_code_csv_url': reverse('spent_registration_codes', kwargs={'course_id': unicode(course_key)}),
'set_course_mode_url': reverse('set_course_mode_price', kwargs={'course_id': unicode(course_key)}),
'download_coupon_codes_url': reverse('get_coupon_codes', kwargs={'course_id': unicode(course_key)}),
'enrollment_report_url': reverse('get_enrollment_report', kwargs={'course_id': unicode(course_key)}),
'exec_summary_report_url': reverse('get_exec_summary_report', kwargs={'course_id': unicode(course_key)}),
'list_financial_report_downloads_url': reverse('list_financial_report_downloads',
kwargs={'course_id': unicode(course_key)}),
'list_instructor_tasks_url': reverse('list_instructor_tasks', kwargs={'course_id': unicode(course_key)}),
'look_up_registration_code': reverse('look_up_registration_code', kwargs={'course_id': unicode(course_key)}),
'coupons': coupons,
'sales_admin': access['sales_admin'],
'coupons_enabled': coupons_enabled,
'reports_enabled': reports_enabled,
'course_price': course_price,
'total_amount': total_amount
}
return section_data
def _section_certificates(course):
"""Section information for the certificates panel.
The certificates panel allows global staff to generate
example certificates and enable self-generated certificates
for a course.
Arguments:
course (Course)
Returns:
dict
"""
example_cert_status = certs_api.example_certificates_status(course.id)
# Allow the user to enable self-generated certificates for students
# *only* once a set of example certificates has been successfully generated.
# If certificates have been misconfigured for the course (for example, if
# the PDF template hasn't been uploaded yet), then we don't want
# to turn on self-generated certificates for students!
can_enable_for_course = (
example_cert_status is not None and
all(
cert_status['status'] == 'success'
for cert_status in example_cert_status
)
)
instructor_generation_enabled = settings.FEATURES.get('CERTIFICATES_INSTRUCTOR_GENERATION', False)
return {
'section_key': 'certificates',
'section_display_name': _('Certificates'),
'example_certificate_status': example_cert_status,
'can_enable_for_course': can_enable_for_course,
'enabled_for_course': certs_api.cert_generation_enabled(course.id),
'instructor_generation_enabled': instructor_generation_enabled,
'urls': {
'generate_example_certificates': reverse(
'generate_example_certificates',
kwargs={'course_id': course.id}
),
'enable_certificate_generation': reverse(
'enable_certificate_generation',
kwargs={'course_id': course.id}
),
'start_certificate_generation': reverse(
'start_certificate_generation',
kwargs={'course_id': course.id}
),
'list_instructor_tasks_url': reverse(
'list_instructor_tasks',
kwargs={'course_id': course.id}
),
}
}
@ensure_csrf_cookie
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
@require_POST
@login_required
def set_course_mode_price(request, course_id):
"""
set the new course price and add new entry in the CourseModesArchive Table
"""
try:
course_price = int(request.POST['course_price'])
except ValueError:
return JsonResponse(
{'message': _("Please Enter the numeric value for the course price")},
status=400) # status code 400: Bad Request
currency = request.POST['currency']
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
course_honor_mode = CourseMode.objects.filter(mode_slug='honor', course_id=course_key)
if not course_honor_mode:
return JsonResponse(
{'message': _("CourseMode with the mode slug({mode_slug}) DoesNotExist").format(mode_slug='honor')},
status=400) # status code 400: Bad Request
CourseModesArchive.objects.create(
course_id=course_id, mode_slug='honor', mode_display_name='Honor Code Certificate',
min_price=getattr(course_honor_mode[0], 'min_price'), currency=getattr(course_honor_mode[0], 'currency'),
expiration_datetime=datetime.datetime.now(pytz.utc), expiration_date=datetime.date.today()
)
course_honor_mode.update(
min_price=course_price,
currency=currency
)
return JsonResponse({'message': _("CourseMode price updated successfully")})
def _section_course_info(course, access):
""" Provide data for the corresponding dashboard section """
course_key = course.id
section_data = {
'section_key': 'course_info',
'section_display_name': _('Course Info'),
'access': access,
'course_id': course_key,
'course_display_name': course.display_name,
'has_started': course.has_started(),
'has_ended': course.has_ended(),
'list_instructor_tasks_url': reverse('list_instructor_tasks', kwargs={'course_id': unicode(course_key)}),
}
if settings.FEATURES.get('DISPLAY_ANALYTICS_ENROLLMENTS'):
section_data['enrollment_count'] = CourseEnrollment.objects.enrollment_counts(course_key)
if settings.ANALYTICS_DASHBOARD_URL:
dashboard_link = _get_dashboard_link(course_key)
message = _("Enrollment data is now available in {dashboard_link}.").format(dashboard_link=dashboard_link)
section_data['enrollment_message'] = message
if settings.FEATURES.get('ENABLE_SYSADMIN_DASHBOARD'):
section_data['detailed_gitlogs_url'] = reverse('gitlogs_detail', kwargs={'course_id': unicode(course_key)})
try:
advance = lambda memo, (letter, score): "{}: {}, ".format(letter, score) + memo
section_data['grade_cutoffs'] = reduce(advance, course.grade_cutoffs.items(), "")[:-2]
except Exception: # pylint: disable=broad-except
section_data['grade_cutoffs'] = "Not Available"
# section_data['offline_grades'] = offline_grades_available(course_key)
try:
section_data['course_errors'] = [(escape(a), '') for (a, _unused) in modulestore().get_course_errors(course.id)]
except Exception: # pylint: disable=broad-except
section_data['course_errors'] = [('Error fetching errors', '')]
return section_data
def _section_membership(course, access, is_white_label):
""" Provide data for the corresponding dashboard section """
course_key = course.id
ccx_enabled = settings.FEATURES.get('CUSTOM_COURSES_EDX', False) and course.enable_ccx
section_data = {
'section_key': 'membership',
'section_display_name': _('Membership'),
'access': access,
'ccx_is_enabled': ccx_enabled,
'is_white_label': is_white_label,
'enroll_button_url': reverse('students_update_enrollment', kwargs={'course_id': unicode(course_key)}),
'unenroll_button_url': reverse('students_update_enrollment', kwargs={'course_id': unicode(course_key)}),
'upload_student_csv_button_url': reverse('register_and_enroll_students', kwargs={'course_id': unicode(course_key)}),
'modify_beta_testers_button_url': reverse('bulk_beta_modify_access', kwargs={'course_id': unicode(course_key)}),
'list_course_role_members_url': reverse('list_course_role_members', kwargs={'course_id': unicode(course_key)}),
'modify_access_url': reverse('modify_access', kwargs={'course_id': unicode(course_key)}),
'list_forum_members_url': reverse('list_forum_members', kwargs={'course_id': unicode(course_key)}),
'update_forum_role_membership_url': reverse('update_forum_role_membership', kwargs={'course_id': unicode(course_key)}),
}
return section_data
def _section_cohort_management(course, access):
""" Provide data for the corresponding cohort management section """
course_key = course.id
section_data = {
'section_key': 'cohort_management',
'section_display_name': _('Cohorts'),
'access': access,
'course_cohort_settings_url': reverse(
'course_cohort_settings',
kwargs={'course_key_string': unicode(course_key)}
),
'cohorts_url': reverse('cohorts', kwargs={'course_key_string': unicode(course_key)}),
'upload_cohorts_csv_url': reverse('add_users_to_cohorts', kwargs={'course_id': unicode(course_key)}),
'discussion_topics_url': reverse('cohort_discussion_topics', kwargs={'course_key_string': unicode(course_key)}),
}
return section_data
def _is_small_course(course_key):
""" Compares against MAX_ENROLLMENT_INSTR_BUTTONS to determine if course enrollment is considered small. """
is_small_course = False
enrollment_count = CourseEnrollment.objects.num_enrolled_in(course_key)
max_enrollment_for_buttons = settings.FEATURES.get("MAX_ENROLLMENT_INSTR_BUTTONS")
if max_enrollment_for_buttons is not None:
is_small_course = enrollment_count <= max_enrollment_for_buttons
return is_small_course
def _section_student_admin(course, access):
""" Provide data for the corresponding dashboard section """
course_key = course.id
is_small_course = _is_small_course(course_key)
section_data = {
'section_key': 'student_admin',
'section_display_name': _('Student Admin'),
'access': access,
'is_small_course': is_small_course,
'get_student_progress_url_url': reverse('get_student_progress_url', kwargs={'course_id': unicode(course_key)}),
'enrollment_url': reverse('students_update_enrollment', kwargs={'course_id': unicode(course_key)}),
'reset_student_attempts_url': reverse('reset_student_attempts', kwargs={'course_id': unicode(course_key)}),
'reset_student_attempts_for_entrance_exam_url': reverse(
'reset_student_attempts_for_entrance_exam',
kwargs={'course_id': unicode(course_key)},
),
'rescore_problem_url': reverse('rescore_problem', kwargs={'course_id': unicode(course_key)}),
'rescore_entrance_exam_url': reverse('rescore_entrance_exam', kwargs={'course_id': unicode(course_key)}),
'student_can_skip_entrance_exam_url': reverse(
'mark_student_can_skip_entrance_exam',
kwargs={'course_id': unicode(course_key)},
),
'list_instructor_tasks_url': reverse('list_instructor_tasks', kwargs={'course_id': unicode(course_key)}),
'list_entrace_exam_instructor_tasks_url': reverse('list_entrance_exam_instructor_tasks',
kwargs={'course_id': unicode(course_key)}),
'spoc_gradebook_url': reverse('spoc_gradebook', kwargs={'course_id': unicode(course_key)}),
}
return section_data
def _section_extensions(course):
""" Provide data for the corresponding dashboard section """
section_data = {
'section_key': 'extensions',
'section_display_name': _('Extensions'),
'units_with_due_dates': [(title_or_url(unit), unicode(unit.location))
for unit in get_units_with_due_date(course)],
'change_due_date_url': reverse('change_due_date', kwargs={'course_id': unicode(course.id)}),
'reset_due_date_url': reverse('reset_due_date', kwargs={'course_id': unicode(course.id)}),
'show_unit_extensions_url': reverse('show_unit_extensions', kwargs={'course_id': unicode(course.id)}),
'show_student_extensions_url': reverse('show_student_extensions', kwargs={'course_id': unicode(course.id)}),
}
return section_data
def _section_data_download(course, access):
""" Provide data for the corresponding dashboard section """
course_key = course.id
section_data = {
'section_key': 'data_download',
'section_display_name': _('Data Download'),
'access': access,
'get_grading_config_url': reverse('get_grading_config', kwargs={'course_id': unicode(course_key)}),
'get_students_features_url': reverse('get_students_features', kwargs={'course_id': unicode(course_key)}),
'get_students_who_may_enroll_url': reverse(
'get_students_who_may_enroll', kwargs={'course_id': unicode(course_key)}
),
'get_anon_ids_url': reverse('get_anon_ids', kwargs={'course_id': unicode(course_key)}),
'list_instructor_tasks_url': reverse('list_instructor_tasks', kwargs={'course_id': unicode(course_key)}),
'list_report_downloads_url': reverse('list_report_downloads', kwargs={'course_id': unicode(course_key)}),
'calculate_grades_csv_url': reverse('calculate_grades_csv', kwargs={'course_id': unicode(course_key)}),
'problem_grade_report_url': reverse('problem_grade_report', kwargs={'course_id': unicode(course_key)}),
}
return section_data
def null_applicable_aside_types(block): # pylint: disable=unused-argument
"""
get_aside method for monkey-patching into applicable_aside_types
while rendering an HtmlDescriptor for email text editing. This returns
an empty list.
"""
return []
def _section_send_email(course, access):
""" Provide data for the corresponding bulk email section """
course_key = course.id
# Monkey-patch applicable_aside_types to return no asides for the duration of this render
with patch.object(course.runtime, 'applicable_aside_types', null_applicable_aside_types):
# This HtmlDescriptor is only being used to generate a nice text editor.
html_module = HtmlDescriptor(
course.system,
DictFieldData({'data': ''}),
ScopeIds(None, None, None, course_key.make_usage_key('html', 'fake'))
)
fragment = course.system.render(html_module, 'studio_view')
fragment = wrap_xblock(
'LmsRuntime', html_module, 'studio_view', fragment, None,
extra_data={"course-id": unicode(course_key)},
usage_id_serializer=lambda usage_id: quote_slashes(unicode(usage_id)),
# Generate a new request_token here at random, because this module isn't connected to any other
# xblock rendering.
request_token=uuid.uuid1().get_hex()
)
email_editor = fragment.content
section_data = {
'section_key': 'send_email',
'section_display_name': _('Email'),
'access': access,
'send_email': reverse('send_email', kwargs={'course_id': unicode(course_key)}),
'editor': email_editor,
'list_instructor_tasks_url': reverse(
'list_instructor_tasks', kwargs={'course_id': unicode(course_key)}
),
'email_background_tasks_url': reverse(
'list_background_email_tasks', kwargs={'course_id': unicode(course_key)}
),
'email_content_history_url': reverse(
'list_email_content', kwargs={'course_id': unicode(course_key)}
),
}
return section_data
def _get_dashboard_link(course_key):
""" Construct a URL to the external analytics dashboard """
analytics_dashboard_url = '{0}/courses/{1}'.format(settings.ANALYTICS_DASHBOARD_URL, unicode(course_key))
link = u"<a href=\"{0}\" target=\"_blank\">{1}</a>".format(analytics_dashboard_url,
settings.ANALYTICS_DASHBOARD_NAME)
return link
def _section_analytics(course, access):
""" Provide data for the corresponding dashboard section """
course_key = course.id
analytics_dashboard_url = '{0}/courses/{1}'.format(settings.ANALYTICS_DASHBOARD_URL, unicode(course_key))
link_start = "<a href=\"{}\" target=\"_blank\">".format(analytics_dashboard_url)
insights_message = _("For analytics about your course, go to {analytics_dashboard_name}.")
insights_message = insights_message.format(
analytics_dashboard_name='{0}{1}</a>'.format(link_start, settings.ANALYTICS_DASHBOARD_NAME)
)
section_data = {
'section_key': 'instructor_analytics',
'section_display_name': _('Analytics'),
'access': access,
'insights_message': insights_message,
}
return section_data
def _section_metrics(course, access):
"""Provide data for the corresponding dashboard section """
course_key = course.id
section_data = {
'section_key': 'metrics',
'section_display_name': _('Metrics'),
'access': access,
'course_id': unicode(course_key),
'sub_section_display_name': get_section_display_name(course_key),
'section_has_problem': get_array_section_has_problem(course_key),
'get_students_opened_subsection_url': reverse('get_students_opened_subsection'),
'get_students_problem_grades_url': reverse('get_students_problem_grades'),
'post_metrics_data_csv_url': reverse('post_metrics_data_csv'),
}
return section_data
|
polimediaupv/edx-platform
|
lms/djangoapps/instructor/views/instructor_dashboard.py
|
Python
|
agpl-3.0
| 26,692
|
[
"VisIt"
] |
1ad397ebae59366cabb2d405891b5a8a4c9ea61db1e5dc8ada64410bd01faacb
|
import openvoronoi as ovd
import ovdvtk
import time
import vtk
import datetime
import math
import random
import os
import sys
import pickle
import gzip
import ovdgenerators as gens
def drawLine(myscreen, pt1, pt2, lineColor):
myscreen.addActor(ovdvtk.Line(p1=(pt1.x, pt1.y, 0), p2=(pt2.x, pt2.y, 0), color=lineColor))
def drawArc(myscreen, pt1, pt2, r, arcColor):
myscreen.addActor(ovdvtk.Line(p1=(pt1.x, pt1.y, 0), p2=(pt2.x, pt2.y, 0), color=arcColor))
def drawOffsets(myscreen, ofs):
# draw loops
nloop = 0
lineColor = ovdvtk.green
arcColor = ovdvtk.grass
for lop in ofs:
n = 0
N = len(lop)
first_point = []
previous = []
for p in lop:
# p[0] is the Point
# p[1] is -1 for lines, and r for arcs
if n == 0: # don't draw anything on the first iteration
previous = p[0]
# first_point = p[0]
else:
r = p[1]
p = p[0]
if r == -1:
drawLine(myscreen, previous, p, lineColor)
else:
drawArc(myscreen, previous, p, r, arcColor)
# myscreen.addActor( ovdvtk.Line(p1=(previous.x,previous.y,0),p2=(p.x,p.y,0),color=loopColor) )
previous = p
n = n + 1
print "rendered loop ", nloop, " with ", len(lop), " points"
nloop = nloop + 1
poly_points = [(-0.2567719874411157, -0.4983049800651602),
(0.12205285479992212, -0.640371712930281),
(-0.25972854724944455, -0.5143879072702902),
(-0.34168692840153536, -0.6418861147966213),
(-0.5288215108461576, 0.18480346369654843),
(-0.35263585687204546, -0.50735692278175),
(-0.4821854389417177, 0.46463421861462373)]
if __name__ == "__main__":
# w=2500
# h=1500
# w=1920
# h=1080
w = 1024
h = 1024
myscreen = ovdvtk.VTKScreen(width=w, height=h)
ovdvtk.drawOCLtext(myscreen, rev_text=ovd.version())
scale = 1
myscreen.render()
random.seed(42)
far = 1
camPos = far
zmult = 3
# camPos/float(1000)
myscreen.camera.SetPosition(0, -camPos / float(1000), zmult * camPos)
myscreen.camera.SetClippingRange(-(zmult + 1) * camPos, (zmult + 1) * camPos)
myscreen.camera.SetFocalPoint(0.0, 0, 0)
vd = ovd.VoronoiDiagram(far, 120)
print ovd.version()
# for vtk visualization
vod = ovdvtk.VD(myscreen, vd, float(scale), textscale=0.01, vertexradius=0.003)
vod.drawFarCircle()
vod.textScale = 0.02
vod.vertexRadius = 0.0031
vod.drawVertices = 0
vod.drawVertexIndex = 1
vod.drawGenerators = 1
vod.offsetEdges = 0
vd.setEdgeOffset(0.05)
"""
p1=ovd.Point(-0.1,-0.2)
p2=ovd.Point(0.2,0.1)
p3=ovd.Point(0.4,0.2)
p4=ovd.Point(0.6,0.6)
p5=ovd.Point(-0.6,0.3)
pts = [p1,p2,p3,p4,p5]
"""
pts = []
for p in poly_points:
pts.append(ovd.Point(p[0], p[1]))
# t_after = time.time()
# print ".done in {0:.3f} s.".format( t_after-t_before )
times = []
id_list = []
m = 0
t_before = time.time()
for p in pts:
pt_id = vd.addVertexSite(p)
id_list.append(pt_id)
print m, " added vertex", pt_id, " at ", p
m = m + 1
t_after = time.time()
times.append(t_after - t_before)
# exit()
# print " ",2*Nmax," point-sites sites took {0:.3f}".format(times[0])," seconds, {0:.2f}".format( 1e6*float( times[0] )/(float(2*Nmax)*float(math.log10(2*Nmax))) ) ,"us/n*log(n)"
print "all point sites inserted. "
print "VD check: ", vd.check()
print "now adding line-segments."
t_before = time.time()
for n in [0]: # range(len(id_list)):
if n == len(id_list) - 1:
vd.addLineSite(id_list[n], id_list[n + 1])
print n, " added segment", n, " to ", n + 1
else:
vd.addLineSite(id_list[n], id_list[0])
print n, " added final segment", n, " to ", 0
# vd.addLineSite( id_list[1], id_list[2])
# vd.addLineSite( id_list[2], id_list[3])
# vd.addLineSite( id_list[3], id_list[4])
# vd.addLineSite( id_list[4], id_list[0])
vd.check()
t_after = time.time()
line_time = t_after - t_before
if line_time < 1e-3:
line_time = 1
times.append(line_time)
# of = ovd.Offset( vd.getGraph() ) # pass the created graph to the Offset class
# of.str()
# ofs = of.offset(0.123)
# print ofs
# drawOffsets(myscreen, ofs)
pi = ovd.PolygonInterior(True)
vd.filter_graph(pi)
of = ovd.Offset(vd.getGraph()) # pass the created graph to the Offset class
ofs = of.offset(0.123)
# print ofs
ovdvtk.drawOffsets(myscreen, ofs)
# of.offset(0.125)
vod.setVDText2(times)
vod.setAll()
print "PYTHON All DONE."
myscreen.render()
myscreen.iren.Start()
|
aewallin/openvoronoi
|
python_examples/issues/polygon_2015-02-09.py
|
Python
|
lgpl-2.1
| 4,948
|
[
"VTK"
] |
75a12c56b780eae89d08e52422f359ccd9c52356fd66f128b9b21a56f95a6ede
|
import ocl
import camvtk
import time
import vtk
import datetime
import math
def drawPoints(myscreen, clpoints, ccpoints):
c=camvtk.PointCloud( pointlist=clpoints, collist=ccpoints)
c.SetPoints()
myscreen.addActor(c )
def drawFiber(myscreen, f, fibercolor=camvtk.red):
inter = f.getInts()
for i in inter:
if not i.empty():
ip1 = f.point( i.lower )
ip2 = f.point( i.upper )
myscreen.addActor( camvtk.Line(p1=(ip1.x,ip1.y,ip1.z),p2=(ip2.x,ip2.y,ip2.z), color=fibercolor) )
myscreen.addActor( camvtk.Sphere(center=(ip1.x,ip1.y,ip1.z),radius=0.005, color=camvtk.clColor( i.lower_cc) ) )
myscreen.addActor( camvtk.Sphere(center=(ip2.x,ip2.y,ip2.z),radius=0.005, color=camvtk.clColor( i.upper_cc) ) )
cc1 = i.lower_cc
cc2 = i.upper_cc
myscreen.addActor( camvtk.Sphere(center=(cc1.x,cc1.y,cc1.z),radius=0.005, color=camvtk.lgreen ) )
myscreen.addActor( camvtk.Sphere(center=(cc2.x,cc2.y,cc2.z),radius=0.005, color=camvtk.lgreen ) )
# cutter circle
#c1 = camvtk.Circle(center=(ip1.x,ip1.y,ip1.z), radius = 0.3/2, color=fibercolor)
#myscreen.addActor(c1)
#c2 = camvtk.Circle(center=(ip2.x,ip2.y,ip2.z), radius = 0.3/2, color=fibercolor)
#myscreen.addActor(c2)
def drawFiber_clpts(myscreen, f, fibercolor=camvtk.red):
inter = f.getInts()
for i in inter:
if not i.empty():
ip1 = f.point( i.lower )
ip2 = f.point( i.upper )
myscreen.addActor( camvtk.Line(p1=(ip1.x,ip1.y,ip1.z),p2=(ip2.x,ip2.y,ip2.z), color=fibercolor) )
myscreen.addActor( camvtk.Sphere(center=(ip1.x,ip1.y,ip1.z),radius=0.005, color=camvtk.clColor( i.lower_cc) ) )
myscreen.addActor( camvtk.Sphere(center=(ip2.x,ip2.y,ip2.z),radius=0.005, color=camvtk.clColor( i.upper_cc) ) )
#cc1 = i.lower_cc
#cc2 = i.upper_cc
#myscreen.addActor( camvtk.Sphere(center=(cc1.x,cc1.y,cc1.z),radius=0.005, color=camvtk.lgreen ) )
#myscreen.addActor( camvtk.Sphere(center=(cc2.x,cc2.y,cc2.z),radius=0.005, color=camvtk.lgreen ) )
def yfiber(yvals,s,zh,myscreen):
for y in yvals:
f1 = ocl.Point(-20,y,zh) # start point of fiber
f2 = ocl.Point(+20,y,zh) # end point of fiber
f = ocl.Fiber( f1, f2)
for t in s.getTriangles():
i = ocl.Interval()
#cutter.vertexPush(f,i,t)
#cutter.facetPush(f,i,t)
#cutter.edgePush(f,i,t)
cutter.pushCutter(f,i,t)
f.addInterval(i)
drawFiber_clpts(myscreen, f, camvtk.red)
def xfiber(xvals,s,zh,myscreen):
for x in xvals:
f1 = ocl.Point(x,-20,zh) # start point of fiber
f2 = ocl.Point(x,+20,zh) # end point of fiber
f = ocl.Fiber( f1, f2)
for t in s.getTriangles():
i = ocl.Interval()
#cutter.vertexPush(f,i,t)
#cutter.facetPush(f,i,t)
#cutter.edgePush(f,i,t)
cutter.pushCutter(f,i,t)
f.addInterval(i)
drawFiber_clpts(myscreen, f, camvtk.lblue)
if __name__ == "__main__":
print ocl.revision()
myscreen = camvtk.VTKScreen()
#stl = camvtk.STLSurf("../stl/gnu_tux_mod.stl")
stl = camvtk.STLSurf("../stl/demo.stl")
myscreen.addActor(stl)
stl.SetWireframe()
stl.SetColor((1,1,1))
polydata = stl.src.GetOutput()
s = ocl.STLSurf()
camvtk.vtkPolyData2OCLSTL(polydata, s)
print "STL surface read,", s.size(), "triangles"
cutter = ocl.CylCutter(0.3, 6)
print "lengt=", cutter.getLength()
print "fiber...",
range=30
Nmax = 200
yvals = [float(n-float(Nmax)/2)/Nmax*range for n in xrange(0,Nmax+1)]
xvals = [float(n-float(Nmax)/2)/Nmax*range for n in xrange(0,Nmax+1)]
zmin = -0.1
zmax = 0.5
zNmax = 2
dz = (zmax-zmin)/(zNmax-1)
zvals=[]
#for n in xrange(0,zNmax):
# zvals.append(zmin+n*dz)
zvals.append(0.1)
#zvals = [ float(n-float(zNmax)/2)/zNmax*range for n in xrange(0,zNmax+1)]
#print zvals
#exit()
#cc = ocl.CCPoint()
#zh = -0.1
#zh = 0.2571567
for zh in zvals:
yfiber(yvals,s,zh,myscreen)
xfiber(xvals,s,zh,myscreen)
print "done."
myscreen.camera.SetPosition(0.5, 3, 2)
myscreen.camera.SetFocalPoint(0.5, 0.5, 0)
camvtk.drawArrows(myscreen,center=(-0.5,-0.5,-0.5))
camvtk.drawOCLtext(myscreen)
myscreen.render()
w2if = vtk.vtkWindowToImageFilter()
w2if.SetInput(myscreen.renWin)
lwr = vtk.vtkPNGWriter()
lwr.SetInput( w2if.GetOutput() )
myscreen.iren.Start()
#raw_input("Press Enter to terminate")
|
tectronics/opencamlib
|
scripts/fiber_04_stl.py
|
Python
|
gpl-3.0
| 4,750
|
[
"VTK"
] |
133a87bf179eca4407742c83a8adba243095d98162570c625a476fc1e5c35ed9
|
r"""
Tree representations (:mod:`skbio.tree`)
========================================
.. currentmodule:: skbio.tree
This module provides functionality for working with trees, including
phylogenetic trees and hierarchies, and prefix trees (i.e., tries).
Functionality is provided for constructing trees, for traversing in multiple
ways, comparisons, fetching subtrees, and more. This module supports trees that
are multifurcating and nodes that have single descendants.
Classes
-------
.. autosummary::
:toctree: generated/
TreeNode
CompressedTrie
Phylogenetic Reconstruction
---------------------------
.. autosummary::
:toctree: generated/
nj
Utility Functions
-----------------
.. autosummary::
:toctree: generated/
fasta_to_pairlist
majority_rule
Exceptions
----------
.. autosummary::
:toctree: generated/
TreeError
NoLengthError
DuplicateNodeError
MissingNodeError
NoParentError
Examples
--------
>>> from skbio import TreeNode
>>> from io import StringIO
A new tree can be constructed from a Newick string. Newick is a common format
used to represent tree objects within a file. Newick was part of the original
PHYLIP package from Joseph Felsenstein's group (defined `here
<http://goo.gl/fIY1Iq>`_), and is based around representing nesting with
parentheses. For instance, the following string describes a 3 taxon tree, with
one internal node:
((A, B)C, D)root;
Where A, B, and D are tips of the tree, and C is an internal node that covers
tips A and B.
Now let's construct a simple tree and dump an ASCII representation:
>>> tree = TreeNode.read(StringIO(u"((A, B)C, D)root;"))
>>> print(tree.is_root()) # is this the root of the tree?
True
>>> print(tree.is_tip()) # is this node a tip?
False
>>> print(tree.ascii_art())
/-A
/C-------|
-root----| \-B
|
\-D
There are a few common ways to traverse a tree, and depending on your use,
some methods are more appropriate than others. Wikipedia has a well written
page on tree `traversal methods <http://goo.gl/K4Ufl>`_, and will go into
further depth than what we'll cover here. We're only going to cover two of the
commonly used traversals here, preorder and postorder, but we will show
examples of two other common helper traversal methods to gather tips or
internal nodes.
The first traversal we'll cover is a preorder traversal in which you evaluate
from root to tips, looking at the left most child first. For instance:
>>> for node in tree.preorder():
... print(node.name)
root
C
A
B
D
The next method we'll look at is a postorder traveral which will evaluate the
left subtree tips first before walking back up the tree:
>>> for node in tree.postorder():
... print(node.name)
A
B
C
D
root
`TreeNode` provides two helper methods as well for iterating over just the tips
or for iterating over just the internal nodes.
>>> for node in tree.tips():
... print("Node name: %s, Is a tip: %s" % (node.name, node.is_tip()))
Node name: A, Is a tip: True
Node name: B, Is a tip: True
Node name: D, Is a tip: True
>>> for node in tree.non_tips():
... print("Node name: %s, Is a tip: %s" % (node.name, node.is_tip()))
Node name: C, Is a tip: False
Note, by default, `non_tips` will ignore `self` (which is the root in this
case). You can pass the `include_self` flag to `non_tips` if you wish to
include `self`.
The `TreeNode` provides a few ways to compare trees. First, let's create two
similar trees and compare their topologies using `compare_subsets`. This
distance is the fraction of common clades present in the two trees, where a
distance of 0 means the trees contain identical clades, and a distance of 1
indicates the trees do not share any common clades:
>>> tree1 = TreeNode.read(StringIO(u"((A, B)C, (D, E)F, (G, H)I)root;"))
>>> tree2 = TreeNode.read(StringIO(u"((G, H)C, (D, E)F, (B, A)I)root;"))
>>> tree3 = TreeNode.read(StringIO(u"((D, B)C, (A, E)F, (G, H)I)root;"))
>>> print(tree1.compare_subsets(tree1)) # identity case
0.0
>>> print(tree1.compare_subsets(tree2)) # same tree but different clade order
0.0
>>> print(tree1.compare_subsets(tree3)) # only 1 of 3 common subsets
0.666666666667
We can additionally take into account branch length when computing distances
between trees. First, we're going to construct two new trees with described
branch length, note the difference in the Newick strings:
>>> tree1 = \
... TreeNode.read(StringIO(u"((A:0.1, B:0.2)C:0.3, D:0.4, E:0.5)root;"))
>>> tree2 = \
... TreeNode.read(StringIO(u"((A:0.4, B:0.8)C:0.3, D:0.1, E:0.5)root;"))
In these two trees, we've added on a description of length from the node to
its parent, so for instance:
>>> for node in tree1.postorder():
... print(node.name, node.length)
A 0.1
B 0.2
C 0.3
D 0.4
E 0.5
root None
Now let's compare two trees using the distances computed pairwise between tips
in the trees. The distance computed, by default, is the correlation of all
pairwise tip-to-tip distances between trees:
>>> print(tree1.compare_tip_distances(tree1)) # identity case
0.0
>>> print(tree1.compare_tip_distances(tree2))
0.120492524415
Prefix trees (i.e., tries) examples
-----------------------------------
Construct a Trie from a (key, value) list
>>> from skbio.tree import CompressedTrie
>>> pair_list = [("ab", "0"),
... ("abababa", "1"),
... ("abab", "2"),
... ("baba", "3"),
... ("ababaa", "4"),
... ("a", "5"),
... ("abababa", "6"),
... ("bab", "7"),
... ("babba", "8")]
>>> t = CompressedTrie(pair_list)
Get the number of keys stored in the trie
>>> len(t)
9
Get the number of nodes in the trie
>>> t.size
10
Get the trie's prefix map
>>> t.prefix_map
{'1': ['6', '2', '0', '5'], '8': ['7'], '3': [], '4': []}
Find the value attached to a given key
>>> t.find("ababaa")
['4']
Add a new (key, value) pair to the Trie
>>> t.insert("bac", "9")
>>> t.find("bac")
['9']
>>> t.prefix_map
{'1': ['6', '2', '0', '5'], '9': [], '3': [], '4': [], '8': ['7']}
Create a new trie with a list of sequences
>>> from skbio.tree import fasta_to_pairlist
>>> seqs = [("s0", "ACA"),
... ("s1", "ACAGTC"),
... ("s2", "ACTA"),
... ("s3", "CAGT"),
... ("s4", "CATGAA"),
... ("s5", "A"),
... ("s6", "CATGTA"),
... ("s7", "CACCA")]
>>> t = CompressedTrie(fasta_to_pairlist(seqs))
>>> t.prefix_map
{'s3': [], 's2': [], 's1': ['s0', 's5'], 's7': [], 's6': [], 's4': []}
"""
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
from skbio.util import TestRunner
from ._tree import TreeNode
from ._trie import CompressedTrie, fasta_to_pairlist
from ._nj import nj
from ._majority_rule import majority_rule
from ._exception import (TreeError, NoLengthError, DuplicateNodeError,
MissingNodeError, NoParentError)
__all__ = ['TreeNode', 'CompressedTrie', 'fasta_to_pairlist', 'nj',
'majority_rule', 'TreeError', 'NoLengthError', 'DuplicateNodeError',
'MissingNodeError', 'NoParentError']
test = TestRunner(__file__).test
|
wdwvt1/scikit-bio
|
skbio/tree/__init__.py
|
Python
|
bsd-3-clause
| 7,519
|
[
"scikit-bio"
] |
69ef0c914d05afdc7aef66e45c2b2d1d156ddef028034363e0b4c51de0bebc9d
|
# -*- coding: utf-8 -*-
import os
from datetime import datetime
from testtools import TestCase
from . import makeprefs, dummykey, _temp_home
from shutil import copy
from mock import patch, Mock, MagicMock
from contextlib import nested
class CacheTest(TestCase):
def setUp(self):
super(CacheTest, self).setUp()
self.prefs = makeprefs()
self.home = os.path.join('t', 'data', 'home')
def tearDown(self):
super(CacheTest, self).tearDown()
def _makeit(self, *args, **kwargs):
from lacli.cache import Cache
return Cache(*args, **kwargs)
def test_cache(self):
assert self._makeit("")
def test_prepare(self):
with _temp_home() as home:
cache = self._makeit(home)
cache.prepare('foo', os.path.join('t', 'data', 'arc1'))
archives = cache._for_adf('archives')
self.assertEqual(len(archives), 1)
self.assertEqual('foo',
next(archives.itervalues())['archive'].title)
def test_cache_dir(self):
d = 'archives'
self.assertTrue(
os.path.isdir(self._makeit(self.home)._cache_dir(d)))
with _temp_home() as home:
cache = self._makeit(home)
self.assertFalse(os.path.exists(cache._cache_dir(d)))
self.assertTrue(os.path.isdir(cache._cache_dir(d, write=True)))
def test_archive_open(self):
open_mock = Mock(return_value=None)
import lacli.cache as cache
with patch.object(cache, 'open', open_mock, create=True):
with _temp_home() as home:
cache = self._makeit(home)
dname = os.path.join(home, 'archives')
fname = os.path.join(dname, 'foo')
cache._archive_open('foo', 'w')
open_mock.assert_called_with(fname, 'w')
self.assertTrue(os.path.isdir(dname))
cache._cert_open('foo', 'w')
dname = os.path.join(home, 'certs')
fname = os.path.join(dname, 'foo')
open_mock.assert_called_with(fname, 'w')
self.assertTrue(os.path.isdir(dname))
def test_certs(self):
with _temp_home() as home:
cache = self._makeit(home)
self.assertEqual({}, cache.certs())
cdir = os.path.join(home, 'certs')
os.makedirs(cdir)
certs = cache.certs()
self.assertEqual(0, len(certs))
copy(os.path.join(self.home, 'archives', 'sample.adf'), cdir)
certs = cache.certs()
self.assertEqual(1, len(certs))
self.assertIn('12-345', certs)
self.assertIn('archive', certs['12-345'])
self.assertEqual('My 2013 vacation',
certs['12-345']['archive'].title)
c = certs['12-345']['cert'].keys[1]
self.assertTrue(hasattr(c, 'key'))
self.assertTrue(hasattr(c, 'method'))
self.assertTrue(hasattr(c, 'input'))
self.assertEqual(dummykey, c.input)
self.assertEqual(1, c.key)
self.assertEqual('pbkdf2', c.method)
def test_save_cert(self):
import lacli.cache
from lacore.adf.elements import Archive, Meta, Signature
from StringIO import StringIO
with nested(
patch.object(lacli.cache, 'NamedTemporaryFile', create=True),
patch.object(lacli.cache, 'archive_slug', create=True),
) as (mock_open, slug):
out = StringIO()
mock_open.return_value.__enter__.return_value = MagicMock()
mock_open.return_value.__enter__.return_value.write = out.write
now = datetime.utcfromtimestamp(0)
meta = Meta('zip', 'xor', created=now)
archive = Archive('foo', meta)
slug.return_value = 'foo'
cache = self._makeit(self.home)
cache.save_cert({'archive': archive,
'signature': Signature(aid="foo",
uri="http://baz.com",
created=now)})
args, kwargs = mock_open.call_args
self.assertIn('prefix', kwargs)
self.assertEqual(ADF_EXAMPLE_1, out.getvalue())
def test_save_upload(self):
import lacli.cache
from lacore.adf.elements import Archive, Meta, Signature, Links
from StringIO import StringIO
with patch.object(lacli.cache, 'archive_slug', create=True) as slug:
now = datetime.utcfromtimestamp(0)
meta = Meta('zip', 'xor', created=now)
archive = Archive('foo', meta)
slug.return_value = 'foo'
cache = self._makeit(self.home)
out = StringIO()
aopen = MagicMock()
aopen.return_value.__enter__.return_value = MagicMock()
aopen.return_value.__enter__.return_value.write = out.write
cache._archive_open = aopen
r = cache.save_upload('lalafname',
{'archive': archive,
'signature': Signature(aid="foo",
uri="http://baz.com",
created=now),
'links': Links()},
uri='http://foo.bar',
capsule='Photos')
self.assertEqual(
r, {'fname': 'lalafname', 'link': 'http://foo.bar#C:Photos:',
'archive': archive})
args, kwargs = aopen.call_args
self.assertEqual(('lalafname', 'w'), args)
self.assertEqual(ADF_EXAMPLE_2, out.getvalue())
def test_import_cert(self):
import lacli.cache
with nested(
patch.object(lacli.cache, 'NamedTemporaryFile', create=True),
patch.object(lacli.cache, 'archive_slug', create=True),
_temp_home()
) as (mock_open, slug, home):
mock_open.return_value.__enter__.return_value = MagicMock()
mock_open.return_value.__enter__.return_value.name = "bar"
slug.return_value = 'foo'
cache = self._makeit(home)
cert = os.path.join('t', 'data', 'longaccess-74-5N93.html')
aid, fname = cache.import_cert(cert)
args, kwargs = mock_open.call_args
self.assertIn('prefix', kwargs)
self.assertEqual('bar', fname)
self.assertEqual('74-5N93', aid)
def test_upload_complete(self):
import lacli.cache
cache = self._makeit(self.home)
with nested(
patch.object(lacli.cache, 'open', create=True),
patch.object(lacli.cache, 'load_archive', create=True),
patch.object(lacli.cache, 'make_adf', create=True)
) as (mock_open, mock_load, mock_adf):
from lacore.adf.elements import Archive, Meta
now = datetime.utcfromtimestamp(0)
meta = Meta('zip', 'xor', created=now)
archive = Archive('foo', meta)
mock_load.return_value = {'archive': archive}
uri = 'http://longaccess.com/a'
ds = cache.upload_complete("foo", {'archive_key': 'bar',
'archive': uri})
self.assertIn('signature', ds)
self.assertEqual('bar', ds['signature'].aid)
self.assertEqual(uri, ds['signature'].uri)
ADF_EXAMPLE_1 = """---
!archive {
? !!str "meta"
: !meta {
? !!str "cipher"
: !!str "xor",
? !!str "created"
: !!timestamp "1970-01-01 00:00:00",
? !!str "format"
: !!str "zip",
},
? !!str "title"
: !!str "foo",
}
---
!signature {
? !!str "aid"
: !!str "foo",
? !!str "created"
: !!timestamp "1970-01-01 00:00:00",
? !!str "expires"
: !!timestamp "2000-01-01 00:00:00",
? !!str "uri"
: !!str "http://baz.com",
}
"""
ADF_EXAMPLE_2 = """---
!archive {
? !!str "meta"
: !meta {
? !!str "cipher"
: !!str "xor",
? !!str "created"
: !!timestamp "1970-01-01 00:00:00",
? !!str "format"
: !!str "zip",
},
? !!str "title"
: !!str "foo",
}
---
!links {
? !!str "upload"
: !!str "http://foo.bar#C:Photos:",
}
---
!signature {
? !!str "aid"
: !!str "foo",
? !!str "created"
: !!timestamp "1970-01-01 00:00:00",
? !!str "expires"
: !!timestamp "2000-01-01 00:00:00",
? !!str "uri"
: !!str "http://baz.com",
}
"""
|
longaccess/longaccess-client
|
lacli/t/test_cache.py
|
Python
|
apache-2.0
| 8,661
|
[
"ADF"
] |
74de09e2345b679edc9651da078b2b6f8b784662923404984256f66867e459e9
|
# Import the necessary modules.
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import scipy.optimize
import glob
import skimage.io
import skimage.morphology
import scipy.constants
# Define functions from Justin Bois
# Fit symmetric Gaussian to x, y, z data
def fit_gaussian(x, y, z):
"""
Fits symmetric Gaussian to x, y, z.
Fit func: z = a * exp(-((x - x_0)**2 + (y - y_0)**2) / (2 * sigma**2))
Returns: p = [a, x_0, y_0, sigma]
"""
def sym_gaussian(p):
"""
Returns a Gaussian function:
a**2 * exp(-((x - x_0)**2 + (y - y_0)**2) / (2 * sigma**2))
p = [a, x_0, y_0, sigma]
"""
a, x_0, y_0, sigma = p
return a**2 \
* np.exp(-((x - x_0)**2 + (y - y_0)**2) / (2.0 * sigma**2))
def sym_gaussian_resids(p):
"""Residuals to be sent into leastsq"""
return z - sym_gaussian(p)
def guess_fit_gaussian():
"""
return a, x_0, y_0, and sigma based on computing moments of data
"""
a = z.max()
# Compute moments
total = z.sum()
x_0 = np.dot(x, z) / total
y_0 = np.dot(y, z) / total
# Approximate sigmas
sigma_x = np.dot(x**2, z) / total
sigma_y = np.dot(y**2, z) / total
sigma = np.sqrt(sigma_x * sigma_y)
# Return guess
return (a, x_0, y_0, sigma)
# Get guess
p0 = guess_fit_gaussian()
# Perform optimization using nonlinear least squares
popt, junk_output, info_dict, mesg, ier = \
scipy.optimize.leastsq(sym_gaussian_resids, p0, full_output=True)
# Check to make sure leastsq was successful. If not, return centroid
# estimate.
if ier in (1, 2, 3, 4):
return (popt[0]**2, popt[1], popt[2], popt[3])
else:
return p0
def bead_position_pix(im, selem):
"""
Determines the position of bead in image in units of pixels with
subpixel accuracy.
"""
# The x, y coordinates of pixels are nonzero values in selem
y, x = np.nonzero(selem)
x = x - selem.shape[1] // 2
y = y - selem.shape[0] // 2
# Find the center of the bead to pixel accuracy
peak_flat_ind = np.argmax(im)
peak_j = peak_flat_ind % im.shape[0]
peak_i = (peak_flat_ind - peak_j) // im.shape[1]
# Define local neighborhood
irange = (peak_i - selem.shape[0] // 2, peak_i + selem.shape[0] // 2 + 1)
jrange = (peak_j - selem.shape[1] // 2, peak_j + selem.shape[1] // 2 + 1)
# Get values of the image in local neighborhood
z = im[irange[0]:irange[1], jrange[0]:jrange[1]][selem.astype(np.bool)]
# Fit Gaussian
a, j_subpix, i_subpix, sigma = fit_gaussian(x, y, z)
# Return x-y position
return np.array([peak_i + i_subpix, peak_j + j_subpix])
# Load the images.
g = 'data/optical_tweezer/trapped_bead_5.2x_4_MMStack_Pos0.ome.tif'
im = skimage.io.imread(g)
# We will use the nine-point estimate (as is typically done)
selem = skimage.morphology.square(3)
# Loop through and find centers
centers = []
length=100
time = np.arange(0, length, 1)
for i in range(length):
centers.append(bead_position_pix(np.invert(im[i]), selem))
# Store as NumPy array
centers = np.array(centers)
# Get displacements
x = centers[:,1] - centers[:,1].mean()
y = centers[:,0] - centers[:,0].mean()
# Plot displacement
plt.figure()
plt.plot(time, centers[:,0], lw=1, zorder=1, label=r'$x$')
plt.figure()
plt.plot(time, centers[:,1], lw=0.5, zorder=0, label=r'$y$')
plt.xlabel('time (s)')
plt.ylabel('$x$, $y$ (pixels)')
plt.legend(loc='lower left')
# Get x and y in real units
ip_dist = 0.042
x_micron = x * ip_dist
y_micron = y * ip_dist
# Get k's from equipartition
kT = scipy.constants.k * (273.15 + 22.0) * 1e18
k_x = kT / (x_micron**2).mean()
k_y = kT / (y_micron**2).mean()
# Print result
print('k_x = %.2f pN/µm' % k_x)
print('k_y = %.2f pN/µm' % k_y)
plt.figure()
plt.plot(centers[:,0], centers[:,1], '-')
plt.show()
|
RPGroup-PBoC/gist_pboc_2017
|
code/gaussian_trap_stiffness.py
|
Python
|
mit
| 3,972
|
[
"Gaussian"
] |
2ffc456480554145323fec2fa805564d88938157e2e5484d93b8a9495cb26c94
|
"""
==============================================================
Reading a .dip file form xfit and view with source space in 3D
==============================================================
Here the .dip file was generated with the mne_dipole_fit command.
Detailed unix command is :
$mne_dipole_fit --meas sample_audvis-ave.fif --set 1 --meg --tmin 40 --tmax 95 \
--bmin -200 --bmax 0 --noise sample_audvis-cov.fif \
--bem ../../subjects/sample/bem/sample-5120-bem-sol.fif \
--origin 0:0:40 --mri sample_audvis-meg-oct-6-fwd.fif \
--dip sample_audvis_set1.dip
"""
# Author: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
#
# License: BSD (3-clause)
print(__doc__)
import numpy as np
import mne
from mne.datasets import sample
data_path = sample.data_path()
fwd_fname = data_path + '/MEG/sample/sample_audvis-meg-oct-6-fwd.fif'
dip_fname = data_path + '/MEG/sample/sample_audvis_set1.dip'
bem_fname = data_path + '/subjects/sample/bem/sample-5120-bem-sol.fif'
brain_surface = mne.read_bem_surfaces(bem_fname, add_geom=True)[0]
points = brain_surface['rr']
faces = brain_surface['tris']
fwd = mne.read_forward_solution(fwd_fname)
src = fwd['src']
# read dipoles
time, pos, amplitude, ori, gof = mne.read_dip(dip_fname)
print("Time (ms): %s" % time)
print("Amplitude (nAm): %s" % amplitude)
print("GOF (%%): %s" % gof)
# only plot those for which GOF is above 50%
pos = pos[gof > 50.]
ori = ori[gof > 50.]
time = time[gof > 50.]
###############################################################################
# Show result on 3D source space
try:
from enthought.mayavi import mlab
except:
from mayavi import mlab
lh_points = src[0]['rr']
lh_faces = src[0]['use_tris']
mlab.figure(size=(600, 600), bgcolor=(1, 1, 1), fgcolor=(0, 0, 0))
# show brain surface after proper coordinate system transformation
points = brain_surface['rr']
faces = brain_surface['tris']
coord_trans = fwd['mri_head_t']['trans']
points = np.dot(coord_trans[:3, :3], points.T).T + coord_trans[:3, -1]
mlab.triangular_mesh(points[:, 0], points[:, 1], points[:, 2],
faces, color=(1, 1, 0), opacity=0.3)
# show one cortical surface
mlab.triangular_mesh(lh_points[:, 0], lh_points[:, 1], lh_points[:, 2],
lh_faces, color=(0.7, ) * 3)
# show dipole as small cones
dipoles = mlab.quiver3d(pos[:, 0], pos[:, 1], pos[:, 2],
ori[:, 0], ori[:, 1], ori[:, 2],
opacity=1., scale_factor=4e-4, scalars=time,
mode='cone', colormap='RdBu')
# revert colormap
dipoles.module_manager.scalar_lut_manager.reverse_lut = True
mlab.colorbar(dipoles, title='Dipole fit time (ms)')
# proper 3D orientation
mlab.get_engine().scenes[0].scene.x_plus_view()
|
effigies/mne-python
|
examples/inverse/plot_dipole_fit_result.py
|
Python
|
bsd-3-clause
| 2,770
|
[
"Mayavi"
] |
89aa83983fd46d8b78e9e86ec9fc87ed93bd2e4e988ca745df59cfba1a43fc92
|
# $Id$
#
# Copyright (C) 2003-2008 greg Landrum and Rational Discovery LLC
#
# @@ All Rights Reserved @@
# This file is part of the RDKit.
# The contents are covered by the terms of the BSD license
# which is included in the file license.txt, found at the root
# of the RDKit source tree.
#
"""unit testing code for the ScreenComposite functionality
"""
from rdkit import RDConfig
import unittest,os
from rdkit.ML import BuildComposite
from rdkit.ML import ScreenComposite
import cPickle as pickle
def feq(a,b,tol=1e-4):
if abs(a-b)>tol: return 0
else: return 1
class TestCase(unittest.TestCase):
def setUp(self):
#print '\n%s: '%self.shortDescription(),
self.baseDir = os.path.join(RDConfig.RDCodeDir,'ML','test_data')
self.dbName = RDConfig.RDTestDatabase
self.details = ScreenComposite.SetDefaults()
self.details.dbName = self.dbName
self.details.dbUser = RDConfig.defaultDBUser
self.details.dbPassword = RDConfig.defaultDBPassword
def test1(self):
""" basics """
self.details.tableName = 'ferro_quant'
compos = pickle.load(open(os.path.join(self.baseDir,'ferromag_quant_10.pkl'),
'rb'))
tgt = 7
assert len(compos)==tgt,'bad composite loaded: %d != %d'%(len(compos),tgt)
nGood,misCount,nSkipped,avgGood,avgBad,avgSkip,tbl = ScreenComposite.ScreenFromDetails(compos,self.details)
assert nGood==93
assert misCount==2
assert nSkipped==0
assert feq(avgGood,.9849),avgGood
assert feq(avgBad,.8500),avgBad
assert tbl[0,0] == 54,tbl
assert tbl[1,1] == 39
assert tbl[0,1] == 2
assert tbl[1,0] == 0
def test2(self):
""" include holdout data only """
self.details.tableName = 'ferro_quant'
self.details.doHoldout=1
self.details.doTraining=0
compos = pickle.load(open(os.path.join(self.baseDir,'ferromag_quant_10.pkl'),
'rb'))
tgt = 7
assert len(compos)==tgt,'bad composite loaded: %d != %d'%(len(compos),tgt)
nGood,misCount,nSkipped,avgGood,avgBad,avgSkip,tbl = ScreenComposite.ScreenFromDetails(compos,self.details)
assert nGood==28
assert misCount==1
assert nSkipped==0
assert feq(avgGood,.9857),avgGood
assert feq(avgBad,1.000),avgBad
assert tbl[0,0] == 16,tbl
assert tbl[1,1] == 12
assert tbl[0,1] == 1
assert tbl[1,0] == 0
def test3(self):
""" include training data only """
self.details.tableName = 'ferro_quant'
self.details.doHoldout=0
self.details.doTraining=1
compos = pickle.load(open(os.path.join(self.baseDir,'ferromag_quant_10.pkl'),
'rb'))
tgt = 7
assert len(compos)==tgt,'bad composite loaded: %d != %d'%(len(compos),tgt)
nGood,misCount,nSkipped,avgGood,avgBad,avgSkip,tbl = ScreenComposite.ScreenFromDetails(compos,self.details)
assert nGood==65
assert misCount==1
assert nSkipped==0
assert feq(avgGood,.9846),avgGood
assert feq(avgBad,.7000),avgBad
assert tbl[0,0] == 38,tbl
assert tbl[1,1] == 27
assert tbl[0,1] == 1
assert tbl[1,0] == 0
def test4(self):
""" include thresholding """
self.details.tableName = 'ferro_quant'
self.details.threshold = 0.80
self.details.doHoldout=0
self.details.doTraining=0
compos = pickle.load(open(os.path.join(self.baseDir,'ferromag_quant_10.pkl'),
'rb'))
tgt = 7
assert len(compos)==tgt,'bad composite loaded: %d != %d'%(len(compos),tgt)
nGood,misCount,nSkipped,avgGood,avgBad,avgSkip,tbl = ScreenComposite.ScreenFromDetails(compos,self.details)
assert nGood==87,str(nGood)
assert misCount==1
assert nSkipped==7,nSkipped
assert feq(avgGood,1.0),avgGood
assert feq(avgBad,1.000),avgBad
assert feq(avgSkip,.7571),avgSkip
assert tbl[0,0] == 50
assert tbl[1,1] == 37
assert tbl[0,1] == 1
assert tbl[1,0] == 0
def test5(self):
""" basics """
self.details.tableName = 'ferro_noquant'
compos = pickle.load(open(os.path.join(self.baseDir,'ferromag_auto_10_3.pkl'),
'rb'))
tgt = 10
assert len(compos)==tgt,'bad composite loaded: %d != %d'%(len(compos),tgt)
tpl = ScreenComposite.ScreenFromDetails(compos,self.details)
nGood,misCount,nSkipped,avgGood,avgBad,avgSkip,tbl = tpl
assert nGood==93,nGood
assert misCount==10
assert nSkipped==0
assert feq(avgGood,.9699),avgGood
assert feq(avgBad,.8100),avgBad
assert tbl[0,0] == 48,tbl
assert tbl[1,1] == 45
assert tbl[0,1] == 7
assert tbl[1,0] == 3
def test6(self):
""" multiple models """
self.details.tableName = 'ferro_noquant'
compos = pickle.load(open(os.path.join(self.baseDir,'ferromag_auto_10_3.pkl'),
'rb'))
tgt = 10
assert len(compos)==tgt,'bad composite loaded: %d != %d'%(len(compos),tgt)
composites = [compos,compos]
tpl = ScreenComposite.ScreenFromDetails(composites,self.details)
nGood,misCount,nSkipped,avgGood,avgBad,avgSkip,tbl = tpl
assert feq(nGood[0],93),nGood
assert feq(misCount[0],10)
assert feq(nSkipped[0],0)
assert feq(avgGood[0],.9699),avgGood
assert feq(avgBad[0],.8100),avgBad
assert feq(nGood[1],0)
assert feq(misCount[1],0)
assert feq(nSkipped[1],0)
assert feq(avgGood[1],0)
assert feq(avgBad[1],0)
assert feq(tbl[0,0],48),tbl
assert feq(tbl[1,1],45)
assert feq(tbl[0,1],7)
assert feq(tbl[1,0],3)
def test7(self):
""" shuffle """
self.details.tableName = 'ferro_noquant'
compos = pickle.load(open(os.path.join(self.baseDir,'ferromag_shuffle_10_3.pkl'),
'rb'))
tgt = 10
assert len(compos)==tgt,'bad composite loaded: %d != %d'%(len(compos),tgt)
self.details.shuffleActivities=1
nGood,misCount,nSkipped,avgGood,avgBad,avgSkip,tbl = ScreenComposite.ScreenFromDetails(compos,self.details)
assert nGood==50,nGood
assert misCount==53
assert nSkipped==0
assert feq(avgGood,.7380),avgGood
assert feq(avgBad,.7660),avgBad
assert tbl[0,0] == 30,tbl
assert tbl[1,1] == 20
assert tbl[0,1] == 25
assert tbl[1,0] == 28
def test8(self):
""" shuffle with segmentation """
self.details.tableName = 'ferro_noquant'
compos = pickle.load(open(os.path.join(self.baseDir,'ferromag_shuffle_10_3.pkl'),
'rb'))
tgt = 10
assert len(compos)==tgt,'bad composite loaded: %d != %d'%(len(compos),tgt)
self.details.shuffleActivities=1
self.details.doHoldout=1
nGood,misCount,nSkipped,avgGood,avgBad,avgSkip,tbl = ScreenComposite.ScreenFromDetails(compos,self.details)
assert nGood==19,nGood
assert misCount==12
assert nSkipped==0
assert feq(avgGood,.7737),avgGood
assert feq(avgBad,.7500),avgBad
assert tbl[0,0] == 12,tbl
assert tbl[1,1] == 7
assert tbl[0,1] == 6
assert tbl[1,0] == 6
def test9(self):
""" shuffle with segmentation2 """
self.details.tableName = 'ferro_noquant'
compos = pickle.load(open(os.path.join(self.baseDir,'ferromag_shuffle_10_3.pkl'),
'rb'))
tgt = 10
assert len(compos)==tgt,'bad composite loaded: %d != %d'%(len(compos),tgt)
self.details.shuffleActivities=1
self.details.doTraining=1
nGood,misCount,nSkipped,avgGood,avgBad,avgSkip,tbl = ScreenComposite.ScreenFromDetails(compos,self.details)
assert nGood==31,nGood
assert misCount==41
assert nSkipped==0
assert feq(avgGood,.7161),avgGood
assert feq(avgBad,.7707),avgBad
assert tbl[0,0] == 18,tbl
assert tbl[1,1] == 13
assert tbl[0,1] == 19
assert tbl[1,0] == 22
def test10(self):
""" filtering """
self.details.tableName = 'ferro_noquant'
compos = pickle.load(open(os.path.join(self.baseDir,'ferromag_filt_10_3.pkl'),
'rb'))
tgt = 10
assert len(compos)==tgt,'bad composite loaded: %d != %d'%(len(compos),tgt)
self.details.filterVal=1
self.details.filterFrac=.33
nGood,misCount,nSkipped,avgGood,avgBad,avgSkip,tbl = ScreenComposite.ScreenFromDetails(compos,self.details)
assert nGood==90
assert misCount==13
assert nSkipped==0
assert feq(avgGood,.9578)
assert feq(avgBad,.8538)
assert tbl[0,0] == 54
assert tbl[1,1] == 36
assert tbl[0,1] == 1
assert tbl[1,0] == 12
def test11(self):
""" filtering with segmentation """
self.details.tableName = 'ferro_noquant'
compos = pickle.load(open(os.path.join(self.baseDir,'ferromag_filt_10_3.pkl'),
'rb'))
tgt = 10
assert len(compos)==tgt,'bad composite loaded: %d != %d'%(len(compos),tgt)
self.details.doHoldout=1
self.details.filterVal=1
self.details.filterFrac=.33
nGood,misCount,nSkipped,avgGood,avgBad,avgSkip,tbl = ScreenComposite.ScreenFromDetails(compos,self.details)
assert nGood==37,nGood
assert misCount==6
assert nSkipped==0
assert feq(avgGood,.9594)
assert feq(avgBad,.85)
assert tbl[0,0] == 14,tbl
assert tbl[1,1] == 23
assert tbl[0,1] == 1
assert tbl[1,0] == 5
def test12(self):
""" test the naive bayes composite"""
self.details.tableName = 'ferro_noquant'
compos = pickle.load(open(os.path.join(self.baseDir,'ferromag_NaiveBayes.pkl'),
'rb'))
tgt = 10
assert len(compos)==tgt,'bad composite loaded: %d != %d'%(len(compos),tgt)
self.details.doHoldout=1
nGood,misCount,nSkipped,avgGood,avgBad,avgSkip,tbl = ScreenComposite.ScreenFromDetails(compos,self.details)
assert nGood == 27,nGood
assert misCount == 4,misCount
assert nSkipped == 0,nSkipped
assert feq(avgGood, 0.9407),avgGood
assert feq(avgBad, 0.875),avgBad
assert tbl[0,0] == 11,tbl
assert tbl[0,1] == 4
assert tbl[1,0] == 0
assert tbl[1,1] == 16
if __name__ == '__main__':
unittest.main()
|
rdkit/rdkit-orig
|
rdkit/ML/UnitTestScreenComposite.py
|
Python
|
bsd-3-clause
| 9,996
|
[
"RDKit"
] |
36288e80f86ec8b0d77b0b5a4794e9e1e8f80ad6483be732a1079522327c4ff4
|
# -*- mode: python; coding: utf-8 -*-
"""
FireLogger_ server-side support library for Python.
For usage see ``README.txt`` or visit the `github homepage`_.
.. _FireLogger: https://addons.mozilla.org/en-US/firefox/addon/11090
.. _github homepage: http://github.com/darwin/firepython
"""
__api_version__ = '1.0'
# ^--- corresponds to api version of firelogger
__version__ = '1.0.0' # for python package releases
|
binaryage/firelogger.py
|
firepython/__init__.py
|
Python
|
bsd-3-clause
| 414
|
[
"VisIt"
] |
d10f397dcf1ee5322ccd1f4eb4b95eef285d9248cde7bc4241c1d76b4e370dfa
|
"""Stand-alone entry point for running Pulsar without a web server.
In its simplest form, this method will check the current directory for an
app.yml and run the corresponding configuration as a standalone applciation.
This makes sense when ``app.yml`` contains a ``message_queue_url`` option so
Pulsar is configured to listen to a message queue and doesn't require a web
server.
The following commands can be used to bootstrap such a setup.::
mkdir pulsar-mq-config
cd pulsar-mq-config
pulsar-config --mq
pulsar-main
This script can be used in a standalone fashion, but it is generally better to
run the ``pulsar`` script with ``--mode webless`` - which will in turn
delegate to this script.
"""
import logging
from logging.config import fileConfig
import os
import functools
import time
import sys
import configparser
try:
import yaml
except ImportError:
yaml = None # type: ignore
try:
from daemonize import Daemonize
except ImportError:
Daemonize = None
from argparse import ArgumentParser
from argparse import RawDescriptionHelpFormatter
log = logging.getLogger(__name__)
REQUIRES_DAEMONIZE_MESSAGE = "Attempted to use Pulsar in daemon mode, but daemonize is unavailable."
PULSAR_ROOT_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
if "PULSAR_CONFIG_DIR" in os.environ:
PULSAR_CONFIG_DIR = os.path.abspath(os.environ["PULSAR_CONFIG_DIR"])
else:
PULSAR_CONFIG_DIR = PULSAR_ROOT_DIR
DEFAULT_INI_APP = "main"
DEFAULT_INI = "server.ini"
DEFAULT_APP_YAML = "app.yml"
DEFAULT_MANAGER = "_default_"
DEFAULT_PID = "pulsar.pid"
DEFAULT_VERBOSE = True
HELP_CONFIG_DIR = "Default directory to search for relevant Pulsar configuration files (e.g. app.yml, server.ini)."
HELP_INI_PATH = "Specify an explicit path to Pulsar's server.ini configuration file."
HELP_APP_CONF_PATH = "Specify an explicit path to Pulsar's app.yml configuration file."
HELP_APP_CONF_BASE64 = "Specify an application configuration as a base64 encoded JSON blob."
HELP_DAEMONIZE = "Daemonzie process (requires daemonize library)."
CONFIG_PREFIX = "PULSAR_CONFIG_"
LOGGING_CONFIG_DEFAULT = {
'version': 1,
'root': {
'handlers': ['console'],
'level': 'INFO',
},
'loggers': {
'pulsar': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': 0,
'qualname': 'pulsar',
},
'galaxy': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': 0,
'qualname': 'pulsar',
},
},
'handlers': {
'console': {
'class': 'logging.StreamHandler',
'formatter': 'default',
'level': 'DEBUG',
'stream': 'ext://sys.stderr',
},
},
'formatters': {
'default': {
'format': '%(asctime)s %(levelname)-5.5s [%(name)s][%(threadName)s] %(message)s'
},
},
}
def load_pulsar_app(
config_builder,
config_env=False,
log=None,
**kwds
):
# Allow specification of log so daemon can reuse properly configured one.
if log is None:
log = logging.getLogger(__name__)
# If called in daemon mode, set the ROOT directory and ensure Pulsar is on
# sys.path.
if config_env:
try:
os.chdir(PULSAR_ROOT_DIR)
except Exception:
log.exception("Failed to chdir")
raise
try:
sys.path.append(PULSAR_ROOT_DIR)
except Exception:
log.exception("Failed to add Pulsar to sys.path")
raise
config_builder.setup_file_logging()
config = config_builder.load()
config.update(kwds)
import pulsar.core
pulsar_app = pulsar.core.PulsarApp(**config)
return pulsar_app
def app_loop(args, log, config_env):
pulsar_app = _app(args, log, config_env)
sleep = True
while sleep:
try:
time.sleep(5)
except KeyboardInterrupt:
sleep = False
except SystemExit:
sleep = False
except Exception:
pass
try:
pulsar_app.shutdown()
except Exception:
log.exception("Failed to shutdown Pulsar application")
raise
def _app(args, log, config_env):
try:
config_builder = PulsarConfigBuilder(args)
pulsar_app = load_pulsar_app(
config_builder,
config_env=config_env,
log=log,
)
except BaseException:
log.exception("Failed to initialize Pulsar application")
raise
return pulsar_app
def absolute_config_path(path, config_dir):
if path and not os.path.isabs(path):
path = os.path.join(config_dir, path)
return path
def _find_default_app_config(*config_dirs):
for config_dir in config_dirs:
app_config_path = os.path.join(config_dir, DEFAULT_APP_YAML)
if os.path.exists(app_config_path):
return app_config_path
return None
def apply_env_overrides_and_defaults(conf):
override_prefix = "%sOVERRIDE_" % CONFIG_PREFIX
for key in os.environ:
if key == 'PULSAR_CONFIG_DIR':
conf['config_dir'] = os.environ[key]
elif key.startswith(override_prefix):
config_key = key[len(override_prefix):].lower()
conf[config_key] = os.environ[key]
elif key.startswith(CONFIG_PREFIX):
config_key = key[len(CONFIG_PREFIX):].lower()
if config_key not in conf:
conf[config_key] = os.environ[key]
return conf
def load_app_configuration(ini_path=None, app_conf_path=None, app_name=None, local_conf=None, config_dir=PULSAR_CONFIG_DIR):
"""
"""
if ini_path and local_conf is None:
from pulsar.util.pastescript.loadwsgi import ConfigLoader
local_conf = ConfigLoader(ini_path).app_context(app_name).config()
local_conf = local_conf or {}
local_conf['config_dir'] = config_dir
if app_conf_path is None and "app_config" in local_conf:
app_conf_path = absolute_config_path(local_conf["app_config"], config_dir)
if not os.path.exists(app_conf_path) and os.path.exists(app_conf_path + ".sample"):
app_conf_path = app_conf_path + ".sample"
elif ini_path:
# If not explicit app.yml file found - look next to server.ini -
# be it in pulsar root, some temporary staging directory, or /etc.
app_conf_path = _find_default_app_config(
os.path.dirname(ini_path),
)
if app_conf_path:
if yaml is None:
raise Exception("Cannot load configuration from file %s, pyyaml is not available." % app_conf_path)
with open(app_conf_path, "r") as f:
app_conf = yaml.safe_load(f) or {}
local_conf.update(app_conf)
return apply_env_overrides_and_defaults(local_conf)
def find_ini(supplied_ini, config_dir):
if supplied_ini:
return supplied_ini
# If not explicitly supplied an ini, check server.ini and then
# just resort to sample if that has not been configured.
for guess in ["server.ini", "server.ini.sample"]:
ini_path = os.path.join(config_dir, guess)
if os.path.exists(ini_path):
return ini_path
return guess
class PulsarConfigBuilder(object):
""" Generate paste-like configuration from supplied command-line arguments.
"""
def __init__(self, args=None, **kwds):
config_dir = kwds.get("config_dir", None) or (args and args.config_dir) or PULSAR_CONFIG_DIR
ini_path = kwds.get("ini_path", None) or (args and args.ini_path)
app_conf_path = kwds.get("app_conf_path", None) or (args and args.app_conf_path)
app_conf_base64 = args and args.app_conf_base64
if not app_conf_base64 and not app_conf_path:
# If given app_conf_path - use that - else we need to ensure we have an
# ini path.
ini_path = find_ini(ini_path, config_dir)
ini_path = absolute_config_path(ini_path, config_dir=config_dir)
self.config_dir = config_dir
self.ini_path = ini_path
self.app_conf_path = app_conf_path
self.app_conf_base64 = app_conf_base64
self.app_name = kwds.get("app") or (args and args.app) or DEFAULT_INI_APP
@classmethod
def populate_options(cls, arg_parser):
arg_parser.add_argument("-c", "--config_dir", default=None, help=HELP_CONFIG_DIR)
arg_parser.add_argument("--ini_path", default=None, help=HELP_INI_PATH)
arg_parser.add_argument("--app_conf_path", default=None, help=HELP_APP_CONF_PATH)
arg_parser.add_argument("--app_conf_base64", default=None, help=HELP_APP_CONF_BASE64)
arg_parser.add_argument("--app", default=DEFAULT_INI_APP)
# daemon related options...
arg_parser.add_argument("-d", "--daemonize", default=False, help=HELP_DAEMONIZE, action="store_true")
arg_parser.add_argument("--daemon-log-file", default=None, help="Log file for daemon, if --daemonize supplied.")
arg_parser.add_argument("--pid-file", default=DEFAULT_PID, help="Pid file for daemon, if --daemonize supplied (default is %s)." % DEFAULT_PID)
def load(self):
load_kwds = dict(
app_name=self.app_name,
config_dir=self.config_dir,
)
if self.app_conf_base64:
from pulsar.client.util import from_base64_json
local_conf = from_base64_json(self.app_conf_base64)
self.setup_dict_logging(local_conf)
load_kwds["local_conf"] = local_conf
else:
load_kwds.update(dict(
config_dir=self.config_dir,
ini_path=self.ini_path,
app_conf_path=self.app_conf_path,
))
return load_app_configuration(**load_kwds)
def setup_file_logging(self):
if self.ini_path:
raw_config = configparser.ConfigParser()
raw_config.read([self.ini_path])
# https://github.com/mozilla-services/chaussette/pull/32/files
if raw_config.has_section('loggers'):
config_file = os.path.abspath(self.ini_path)
fileConfig(
config_file,
dict(__file__=config_file, here=os.path.dirname(config_file))
)
def setup_dict_logging(self, config):
logging_conf = config.get('logging', None)
if logging_conf is None:
# if using the default logging config, honor the log_level setting
logging_conf = LOGGING_CONFIG_DEFAULT
logging.config.dictConfig(logging_conf)
def to_dict(self):
return dict(
config_dir=self.config_dir,
ini_path=self.ini_path,
app_conf_path=self.app_conf_path,
app=self.app_name
)
class PulsarManagerConfigBuilder(PulsarConfigBuilder):
def __init__(self, args=None, **kwds):
super(PulsarManagerConfigBuilder, self).__init__(args=args, **kwds)
self.manager = kwds.get("manager", None) or (args and args.manager) or DEFAULT_MANAGER
def to_dict(self):
as_dict = super(PulsarManagerConfigBuilder, self).to_dict()
as_dict["manager"] = self.manager
return as_dict
@classmethod
def populate_options(cls, arg_parser):
PulsarConfigBuilder.populate_options(arg_parser)
arg_parser.add_argument("--manager", default=DEFAULT_MANAGER)
def main(argv=None, config_env=False):
mod_docstring = sys.modules[__name__].__doc__
arg_parser = ArgumentParser(
description=mod_docstring,
formatter_class=RawDescriptionHelpFormatter,
)
PulsarConfigBuilder.populate_options(arg_parser)
args = arg_parser.parse_args(argv)
pid_file = args.pid_file
log.setLevel(logging.DEBUG)
log.propagate = False
if args.daemonize:
if Daemonize is None:
raise ImportError(REQUIRES_DAEMONIZE_MESSAGE)
keep_fds = []
if args.daemon_log_file:
fh = logging.FileHandler(args.daemon_log_file, "w")
fh.setLevel(logging.DEBUG)
log.addHandler(fh)
keep_fds.append(fh.stream.fileno())
else:
fh = logging.StreamHandler(sys.stderr)
fh.setLevel(logging.DEBUG)
log.addHandler(fh)
daemon = Daemonize(
app="pulsar",
pid=pid_file,
action=functools.partial(app_loop, args, log, config_env),
verbose=DEFAULT_VERBOSE,
logger=log,
keep_fds=keep_fds,
)
daemon.start()
else:
app_loop(args, log, config_env)
if __name__ == "__main__":
main(config_env=True)
|
natefoo/pulsar
|
pulsar/main.py
|
Python
|
apache-2.0
| 12,733
|
[
"Galaxy"
] |
c4fd4635cef64f148ec97afb8ce5cbf0f310eb8c0135e68e8bc503748d60bc73
|
#!/usr/bin/env python
import os, pickle, random, time
try:
os.remove('my_gp_module.pyc')
except OSError:
pass
import scipy as sp
from scipy.linalg import eigh
from my_gp_module import GaussianProcess
import matplotlib.pyplot as plt
# --------------------------------------------
# WHAT IT DOES:
# Given Ntest configurations to keep track of,
# For increasing number Ntot of configurations:
# - Teach all of them and save their regression coefficients alpha
# - Do a second teaching not including the test configurations
# - Predict energy of test configurations using 2nd teaching and evaluate error
# Plot alpha vs. error // alpha STD vs. MAE error
# --------------------------------------------
# --------------------------------------------
# Parameters for the run
# --------------------------------------------
split = 1
N_models = 1
theta0 = 10.0
Ntest = 100
# --------------------------------------------
# Load all database
# --------------------------------------------
ttt = time.clock()
if not os.path.exists('qm7.pkl'): os.system('wget http://www.quantum-machine.org/data/qm7.pkl')
dataset = pickle.load(open('qm7.pkl','r'))
# --------------------------------------------
# Extract training data and test set
# --------------------------------------------
allP = dataset['P'][range(0,split)+range(split+1,5)].flatten()
print "TIMER load_data", time.clock() - ttt
nteach = sp.int32(sp.exp(sp.linspace(sp.log(2*Ntest), sp.log(allP.size), 25)))
# --------------------------------------------
# Loop over different training set sizes
# --------------------------------------------
alpha = []
alpha_std = []
mae_error = []
errors = []
for Nteach in nteach:
# --------------------------------------------
# First time include the test set to calculate their alpha
# --------------------------------------------
print "\n", "-"*60, "\n"
print "N teach = %d" % Nteach
# Select training data
P = allP[:Nteach]
X = dataset['X'][P]
T = dataset['T'][P]
# --------------------------------------------
# Extract feature(s) from training data and test set
# --------------------------------------------
# in this case, only sorted eigenvalues of Coulomb matrix
ttt = time.clock()
eigX = [(eigh(M, eigvals_only=True))[::-1] for M in X]
print "TIMER eval_features", time.clock() - ttt
# Observations
y = T.ravel()
# Setup a Gaussian Process model
ttt = time.clock()
gp = GaussianProcess(corr='absolute_exponential', theta0=sp.asarray([theta0]),
nugget=1e-3, verbose=True, normalise=True, do_features_projection=False, low_memory=False)
# Fit to data
gp.fit(eigX, y)
print "TIMER teach", time.clock() - ttt
local_alpha = gp.alpha[:Ntest]
print "alpha STD: %f" % sp.std(local_alpha)
print "alpha MAV: %f" % sp.mean(sp.absolute(local_alpha))
alpha.append(local_alpha.flatten())
alpha_std.append(sp.std(local_alpha))
# --------------------------------------------
# Second time don't include the test set and predict
# --------------------------------------------
# Extract feature(s) from training data and test set
# --------------------------------------------
eigt = eigX[:Ntest]
eigX = eigX[Ntest:]
# Observations
y = T.ravel()[Ntest:]
y_test = T.ravel()[:Ntest]
# Setup a Gaussian Process model
ttt = time.clock()
gp = GaussianProcess(corr='absolute_exponential', theta0=sp.asarray([theta0]),
nugget=1e-3, verbose=True, normalise=True, do_features_projection=False, low_memory=False)
# Fit to data
gp.fit(eigX, y)
print "TIMER teach", time.clock() - ttt
ttt = time.clock()
# Make the prediction on test set
y_pred, MSE = gp.predict(eigt, eval_MSE=True)
sigma = sp.sqrt(MSE)
mae_error.append(sp.absolute(y_pred-y_test).mean(axis=0))
errors.append(sp.absolute(y_pred-y_test))
print('\n test set:')
print('MAE: %5.2f kcal/mol' % sp.absolute(y_pred-y_test).mean(axis=0))
print('RMSE: %5.2f kcal/mol' % sp.square(y_pred-y_test).mean(axis=0)**.5)
print "TIMER predict", time.clock() - ttt
# Plot alpha STD vs. MAE error scatter (1 plot, dots, ~ 1 line)
# plt.plot(alpha_std, mae_error, 'o')
# plt.xlabel("regression coefficients STD")
# plt.ylabel("mean absolute error")
# plt.savefig('alphastd_vs_maeerror.png')
# Plot alpha vs. error scatter for selected test confs (1 plot, nplots <= Ntest lines)
nplots = 8
alpha = sp.array(alpha).T
errors = sp.array(errors).T
for a, err in zip(alpha[:nplots], errors[:nplots]):
plt.plot(a, err, 'o')
plt.xlabel("regression coefficient")
plt.ylabel("absolute error [kcal/mol]")
plt.savefig('alpha_vs_error.png')
for a, err in zip(alpha, errors):
plt.plot(a, err, 'o')
plt.xlabel("regression coefficient")
plt.ylabel("absolute error [kcal/mol]")
plt.savefig('alpha_vs_error_all.png')
# # sp.save("alphas.npy", sp.array(alpha))
|
marcocaccin/MarcoGP
|
alpha_trends.py
|
Python
|
apache-2.0
| 4,979
|
[
"Gaussian"
] |
3d5c89e41624a30400499eec0b60dbe1da0583adb0d6d65556fc7f75cb71c73a
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Notify students of items new or changed since their last visit.
News consists of course-level news and per-student news. Course-level news
consists of things such as a unit becoming available or a new annnouncement.
Student-level news is things like earning a course certificate based on
assessment scores.
We keep one per-course singleton to keep track of course news. This is only
ever appended to. We also keep a per-student record, which tracks both news
items and what items (both course and student level) a student has seen.
When a student views a course page, the news that are still actually new to
that student are calculated and used to populate the News tab in the title
bar. Note that merely having visited a new news item once is not sufficient
to exclude the news item; we only consider news items to be "old news" after a
few hours. This permits students to re-find the same item using the same UI
affordance for a little while.
"""
__author__ = [
'mgainer@google.com (Mike Gainer)',
]
import collections
import os
import jinja2
import appengine_config
from common import resource
from common import schema_fields
from common import users
from common import utc
from common import utils as common_utils
from controllers import sites
from controllers import utils
from models import courses
from models import custom_modules
from models import data_removal
from models import models
from models import services
from models import transforms
from modules.i18n_dashboard import i18n_dashboard
from modules.news import messages
from google.appengine.ext import db
MODULE_NAME = 'news'
NEWS_SETTINGS_SECTION = 'news'
TEMPLATES_DIR = os.path.join(
appengine_config.BUNDLE_ROOT, 'modules', 'news', 'templates')
# Course-level setting field name for enabling News module functionality.
IS_NEWS_ENABLED_SETTING = 'is_news_enabled'
# News items that have been seen more recently than this are still newsworthy.
# Older items may be excluded from the UI and/or removed from the per-Student
# news record for space savings.
NEWSWORTHINESS_SECONDS = 6 * 60 * 60
# Try to show at least this many news items in the News tab, even if that
# means pulling in news that was seen more than NEWSWORTHINESS_SECONDS ago.
MIN_NEWS_ITEMS_TO_DISPLAY = 5
custom_module = None
def is_enabled():
# TODO(mgainer): Add tests to verify that this does the right thing
# when this module is re-enabled in manifest.yaml.
# Enabled/disabled in manifest.yaml
if not custom_module.enabled:
return False
# If we don't have a course, we can't reasonably expect to have course news.
app_context = sites.get_app_context_for_current_request()
if not app_context:
return False
# Enabled at course level?
settings = app_context.get_environ()
news_settings = settings.get(NEWS_SETTINGS_SECTION, {})
return news_settings.get(IS_NEWS_ENABLED_SETTING, True) # True if unset.
class SerializableList(object):
"""Convenience functions to marshal/unmarshal objects from JSON."""
@classmethod
def json_to_list(cls, json_str):
if not json_str:
return []
json_dicts = transforms.loads(json_str)
parsed_dicts = [
transforms.json_to_dict(d, cls.SCHEMA.get_json_schema_dict())
for d in json_dicts]
return [cls(**kwargs) for kwargs in parsed_dicts]
@classmethod
def list_to_json(cls, items):
json_dicts = [
transforms.dict_to_json(transforms.instance_to_dict(item))
for item in items]
return transforms.dumps(json_dicts)
class NewsItem(SerializableList):
"""Behaviorless struct, plus marshal/unmarshal convenience functions."""
FIELD_KEY = 'resource_key'
FIELD_WHEN = 'when'
FIELD_URL = 'url'
FIELD_LABELS = 'labels'
SCHEMA = schema_fields.FieldRegistry('NewsItem')
SCHEMA.add_property(schema_fields.SchemaField(
FIELD_KEY, 'Key', 'string'))
SCHEMA.add_property(schema_fields.SchemaField(
FIELD_WHEN, 'When', 'datetime'))
SCHEMA.add_property(schema_fields.SchemaField(
FIELD_URL, 'URL', 'string'))
SCHEMA.add_property(schema_fields.SchemaField(
FIELD_LABELS, 'Labels', 'string'))
def __init__(self, resource_key, url, when=None, labels=None):
# String version of common.resource.Key
self.resource_key = resource_key
# The time when this item became news.
self.when = when or utc.now_as_datetime()
# URL to the page showing the item.
self.url = url
# Single string giving IDs of labels, whitespace separated. Same as
# labels field on Student, Announcement, Unit and so on. Used to
# restrict news on items that are labelled to only students with
# matching labels. Follows usual label-match rules: if either Student
# or NewsItem does not have labels in a category, category does not
# filter. If both have labels, at least one label must exist in
# common for match.
self.labels = labels or ''
# --------------------------------------------------------------------
# Below here is transient data - not persisted. Overwritten only for
# UX display. Note that since the serialization library ignores
# transient items based on leading-underscore, we also provide
# getter/setter properties to avoid warnings about touching private
# members.
# Distinguish news items that are likely interesting versus items that
# are likely old news for the student.
self._is_new_news = None
# Title, suitably i18n'd for the current display locale.
self._i18n_title = None
@property
def is_new_news(self):
return self._is_new_news
@is_new_news.setter
def is_new_news(self, value):
self._is_new_news = value
@property
def i18n_title(self):
return self._i18n_title
@i18n_title.setter
def i18n_title(self, value):
self._i18n_title = value
class SeenItem(SerializableList):
"""Behaviorless struct, plus marshal/unmarshal convenience functions."""
FIELD_KEY = 'resource_key'
FIELD_WHEN = 'when'
SCHEMA = schema_fields.FieldRegistry('SeenItem')
SCHEMA.add_property(schema_fields.SchemaField(
FIELD_KEY, 'Key', 'string'))
SCHEMA.add_property(schema_fields.SchemaField(
FIELD_WHEN, 'When', 'datetime'))
def __init__(self, resource_key, when):
# String version of common.resource.Key
self.resource_key = resource_key
# The time when this item became news.
self.when = when
class BaseNewsDto(object):
"""Common base for CourseNewsDao, StudentNewsDao."""
NEWS_ITEMS = 'news_items' # JSON array of NewsItem contents
def __init__(self, the_id, the_dict):
self.id = the_id
self.dict = the_dict
def get_news_items(self):
return NewsItem.json_to_list(self.dict.get(self.NEWS_ITEMS))
def _set_news_items(self, news_items):
self.dict[self.NEWS_ITEMS] = NewsItem.list_to_json(news_items)
def add_news_item(self, news_item, overwrite_existing):
news_items = self.get_news_items()
# Only one News item per course object. If user has not seen older
# alert, no point retaining it.
old_item = common_utils.find(
lambda i: i.resource_key == news_item.resource_key, news_items)
if old_item:
if overwrite_existing and old_item.when < news_item.when:
news_items.remove(old_item)
news_items.append(news_item)
else:
news_items.append(news_item)
self._set_news_items(news_items)
def remove_news_item(self, resource_key):
news_items = self.get_news_items()
item = common_utils.find(
lambda i: i.resource_key == resource_key, news_items)
if not item:
return False
news_items.remove(item)
self._set_news_items(news_items)
return True
class BaseNewsDao(models.BaseJsonDao):
@classmethod
def add_news_item(cls, news_item, overwrite_existing=True):
"""Convenience method when only one operation is needed on DTO."""
if not is_enabled():
return
dto = cls.load_or_default()
dto.add_news_item(news_item, overwrite_existing)
cls.save(dto)
@classmethod
def remove_news_item(cls, resource_key):
if not is_enabled():
return
dto = cls.load_or_default()
if dto.remove_news_item(resource_key):
cls.save(dto)
@classmethod
def get_news_items(cls):
"""Convenience method when only one operation is needed on DTO."""
if not is_enabled():
return []
dto = cls.load_or_default()
return dto.get_news_items()
class CourseNewsEntity(models.BaseEntity):
"""Singleton: coursewide news. E.g., new announcements, units, lessons."""
SINGLETON_KEY_NAME = 'singleton'
data = db.TextProperty(indexed=False)
class CourseNewsDto(BaseNewsDto):
"""No extra behavior, just here for naming convenience/commonality."""
pass
class CourseNewsDao(BaseNewsDao):
DTO = CourseNewsDto
ENTITY = CourseNewsEntity
ENTITY_KEY_TYPE = models.BaseJsonDao.EntityKeyTypeName
@classmethod
def load_or_default(cls):
dto = cls.load(CourseNewsEntity.SINGLETON_KEY_NAME)
if not dto:
dto = CourseNewsDto(CourseNewsEntity.SINGLETON_KEY_NAME, {})
return dto
class StudentNewsEntity(models.BaseEntity):
"""Per-Student: Global news items already seen, plus per-student News.
Keyed by student obfuscated user ID.
"""
data = db.TextProperty(indexed=False)
class StudentNewsDto(BaseNewsDto):
SEEN_ITEMS = 'seen'
def get_seen_items(self):
return SeenItem.json_to_list(self.dict.get(self.SEEN_ITEMS))
def _set_seen_items(self, seen_items):
self.dict[self.SEEN_ITEMS] = SeenItem.list_to_json(seen_items)
def mark_item_seen(self, resource_key):
now = utc.now_as_datetime()
# First, add/update a record to indicate that the student has just now
# seen the newsworthy thing.
# Note: Using OrderedDict's here because they permit deletion during
# iteration.
seen_items = collections.OrderedDict(
{i.resource_key: i for i in self.get_seen_items()})
seen_items[resource_key] = SeenItem(resource_key, now)
# As long as we're here, also take this opportunity to clean up:
# Remove pairs of items where we have a 'seen' record and a 'news'
# record for the same key and where the item was seen more than
# NEWSWORTHINESS_SECONDS ago. We retain things that are only
# slightly-old so that students can still use the News feature to
# re-find stuff they've already seen but may still want to re-visit.
news_items = collections.OrderedDict(
{n.resource_key: n for n in self.get_news_items()})
for resource_key, seen_item in seen_items.iteritems():
if (now - seen_item.when).total_seconds() > NEWSWORTHINESS_SECONDS:
if resource_key in news_items:
del news_items[resource_key]
del seen_items[resource_key]
break
self._set_seen_items(seen_items.values())
self._set_news_items(news_items.values())
class StudentNewsDao(BaseNewsDao):
DTO = StudentNewsDto
ENTITY = StudentNewsEntity
ENTITY_KEY_TYPE = models.BaseJsonDao.EntityKeyTypeName
@classmethod
def load_or_default(cls):
# Sanity check: Re-verify that we have a Student. Calling handlers
# should be either checking first or watching for these exceptions and
# converting to reasonable HTML responses.
user = users.get_current_user()
if not user:
raise ValueError('No current user.')
student = models.Student.get_enrolled_student_by_user(user)
if not student:
raise ValueError('No Student found for current user.')
dto = cls.load(user.user_id())
if not dto:
dto = StudentNewsDto(user.user_id(), {})
return dto
@classmethod
def mark_item_seen(cls, resource_key):
"""Convenience method when only one operation is needed on DTO."""
dto = cls.load_or_default()
dto.mark_item_seen(resource_key)
cls.save(dto)
@classmethod
def get_seen_items(cls):
"""Convenience method when only one operation is needed on DTO."""
dto = cls.load_or_default()
return dto.get_seen_items()
def course_page_navbar_callback(app_context):
"""Generate HTML for inclusion on tabs bar.
Thankfully, this function gets called pretty late during page generation,
so StudentNewsDao should already have been notified when we're on a page
that was newsworthy, but now is not because the student has seen it.
"""
# If we don't have a registered student in session, no news for you!
user = users.get_current_user()
if not user:
return []
student = models.Student.get_enrolled_student_by_user(user)
if not student or student.is_transient:
return []
student_dao = StudentNewsDao.load_or_default()
# Combine all news items for consideration.
news = student_dao.get_news_items() + CourseNewsDao.get_news_items()
seen_times = {s.resource_key: s.when
for s in student_dao.get_seen_items()}
# Filter out items that student can't see due to label matching. Do
# this before reducing number of items displayed to a fixed maximum.
course = courses.Course.get(app_context)
models.LabelDAO.apply_course_track_labels_to_student_labels(
course, student, news)
# Run through news items, categorizing 'new' and 'old' news for display.
# news is everything else.
new_news = []
old_news = []
now = utc.now_as_datetime()
enrolled_on = student.enrolled_on.replace(microsecond=0)
for item in news:
seen_when = seen_times.get(item.resource_key)
if seen_when is None:
# Items not yet seen at all get marked for CSS highlighting.
# Items prior to student enrollment are not incremental new stuff;
# we assume that on enroll, the student is on notice that all
# course content is "new", and we don't need to redundantly bring
# it to their attention.
if item.when >= enrolled_on:
item.is_new_news = True
new_news.append(item)
elif (now - seen_when).total_seconds() < NEWSWORTHINESS_SECONDS:
# Items seen recently are always shown, but with CSS dimming.
item.is_new_news = False
new_news.append(item)
else:
# Items seen and not recently are put on seprate list for
# inclusion only if there are few new items.
item.is_new_news = False
old_news.append(item)
# Display setup: Order by time within new, old set. Show all new
# news, and if there are few of those, some old news as well.
new_news.sort(key=lambda n: (n.is_new_news, n.when), reverse=True)
old_news.sort(key=lambda n: n.when, reverse=True)
news = new_news + old_news[
0:max(0, MIN_NEWS_ITEMS_TO_DISPLAY - len(new_news))]
for item in news:
try:
key = resource.Key.fromstring(item.resource_key)
resource_handler = (
i18n_dashboard.TranslatableResourceRegistry.get_by_type(
key.type))
item.i18n_title = resource_handler.get_i18n_title(key)
except AssertionError:
# Not all news things are backed by AbstractResourceHandler types.
# Fall back to news-specific registry for these.
resource_handler = I18nTitleRegistry
key_type, _ = item.resource_key.split(resource.Key.SEPARATOR, 1)
item.i18n_title = resource_handler.get_i18n_title(
key_type, item.resource_key)
# Fill template
template_environ = app_context.get_template_environ(
app_context.get_current_locale(), [TEMPLATES_DIR])
template = template_environ.get_template('news.html', [TEMPLATES_DIR])
return [
jinja2.utils.Markup(template.render({'news': news}, autoescape=True))]
class I18nTitleRegistry(object):
_REGISTRY = {}
@classmethod
def register(cls, type_str, i18n_title_provider):
"""Register a resource handler for news items.
If your newsworthy thing has already implemented a class inheriting
from common.resource.AbstractResourceHandler, you need not register
here; that class will be detected from its registration with
common.resource.Registry and used directly. This registry is only for
things that are newsworthy but do not represent actual resource
entities. This primarily includes less tangible notions, such as
course completion indications.
Args:
i18n_title_provider: A callback that can provide i18n'd title string
for a news item. The callback is provided with one argument:
key: Whatever was set as the news item's key string when it
was added. If the current course or locale are required, use
the various get_current_X functions in controllers.sites.
"""
if type_str in cls._REGISTRY:
raise ValueError('Resource type %s is already registered.' %
type_str)
cls._REGISTRY[type_str] = i18n_title_provider
@classmethod
def unregister(cls, type_str):
if type_str in cls._REGISTRY:
del cls._REGISTRY[type_str]
@classmethod
def get_i18n_title(cls, key_type, key):
return cls._REGISTRY[key_type](key)
def register_module():
name = NEWS_SETTINGS_SECTION + ':' + IS_NEWS_ENABLED_SETTING
news_enabled = schema_fields.SchemaField(
name, 'News', 'boolean',
optional=True, i18n=False, default_value=True,
description=services.help_urls.make_learn_more_message(
messages.IS_NEWS_ENABLED_MESSAGE, name))
course_settings_fields = (
lambda c: news_enabled,
)
def on_module_enabled():
courses.Course.OPTIONS_SCHEMA_PROVIDERS[
courses.Course.SCHEMA_SECTION_COURSE].extend(course_settings_fields)
# Register "News" element on navbar.
utils.CourseHandler.LEFT_LINKS.append(course_page_navbar_callback)
# Register StudentNewsEntity for removal when student requests their
# data be purged.
data_removal.Registry.register_indexed_by_user_id_remover(
StudentNewsEntity.delete_by_key)
# pylint: disable=global-statement
global custom_module
custom_module = custom_modules.Module(
'News', messages.MODULE_DESCRIPTION,
global_routes=[],
namespaced_routes=[],
notify_module_enabled=on_module_enabled)
return custom_module
|
GirlsCodePy/girlscode-coursebuilder
|
modules/news/news.py
|
Python
|
gpl-3.0
| 19,825
|
[
"VisIt"
] |
a00e09eb550cc3a7c7c34b9754ec06679ccd663cb18f42a4425bc2eb066a5298
|
from ase import Atoms
from ase.calculators.emt import EMT
from ase.optimize import QuasiNewton
n2 = Atoms('N2', positions=[(0, 0, 0), (0, 0, 1.1)],
calculator=EMT())
QuasiNewton(n2).run(0.01)
print(n2.get_distance(0, 1), n2.get_potential_energy())
|
suttond/MODOI
|
ase/test/n2.py
|
Python
|
lgpl-3.0
| 260
|
[
"ASE"
] |
a869e842e938fac3886dba4e690dee3c631bc07d2012cc1ec16e49e406872eb8
|
import os
import platform
import _thread as thread
import time
from subprocess import Popen
from .util import kill_pid
from pulsar.managers.base.directory import DirectoryBaseManager
from pulsar.managers import status
from logging import getLogger
log = getLogger(__name__)
JOB_FILE_SUBMITTED = "submitted"
JOB_FILE_PID = "pid"
class BaseUnqueuedManager(DirectoryBaseManager):
def _record_submission(self, job_id):
self._job_directory(job_id).store_metadata(JOB_FILE_SUBMITTED, 'true')
def _get_status(self, job_id):
job_directory = self._job_directory(job_id)
if self._was_cancelled(job_id):
job_status = status.CANCELLED
elif job_directory.has_metadata(JOB_FILE_PID):
job_status = status.RUNNING
elif job_directory.has_metadata(JOB_FILE_SUBMITTED):
job_status = status.QUEUED
else:
job_status = status.COMPLETE
return job_status
def _finish_execution(self, job_id):
self._job_directory(job_id).remove_metadata(JOB_FILE_SUBMITTED)
def _prepare_run(self, job_id, command_line, dependencies_description, env, setup_params=None):
self._check_execution_with_tool_file(job_id, command_line)
self._record_submission(job_id)
if platform.system().lower() == "windows":
# TODO: Don't ignore requirements and env without warning. Ideally
# process them or at least warn about them being ignored.
command_line = self._expand_command_line(command_line, dependencies_description, job_directory=self.job_directory(job_id).job_directory)
else:
command_line = self._setup_job_file(
job_id,
command_line,
dependencies_description=dependencies_description,
env=env,
setup_params=setup_params
)
return command_line
def _start_monitor(self, *args, **kwd):
if kwd.get("background", True):
thread.start_new_thread(self._monitor_execution, args)
else:
self._monitor_execution(*args)
# Job Locks (for status updates). Following methods are locked.
# _finish_execution(self, job_id)
# _get_status(self, job_id)
# _is_cancelled(self, job_id)
# _record_pid(self, job_id, pid)
# _get_pid_for_killing_or_cancel(self, job_id)
#
class Manager(BaseUnqueuedManager):
"""
A simple job manager that just directly runs jobs as given (no
queueing). Preserved for compatibilty with older versions of Pulsar
client code where Galaxy is used to maintain queue (like Galaxy's
local job runner).
"""
manager_type = "unqueued"
def __init__(self, name, app, **kwds):
super(Manager, self).__init__(name, app, **kwds)
def __get_pid(self, job_id):
pid = None
try:
pid = self._job_directory(job_id).load_metadata(JOB_FILE_PID)
if pid is not None:
pid = int(pid)
except Exception:
pass
return pid
def _get_job_lock(self, job_id):
return self._job_directory(job_id).lock()
def get_status(self, job_id):
with self._get_job_lock(job_id):
return self._get_status(job_id)
def kill(self, job_id):
log.info("Attempting to kill job with job_id %s" % job_id)
job_lock = self._get_job_lock(job_id)
with job_lock:
pid = self._get_pid_for_killing_or_cancel(job_id)
if pid:
log.info("Attempting to kill pid %s" % pid)
kill_pid(pid)
def _monitor_execution(self, job_id, proc, stdout, stderr):
try:
proc.wait()
stdout.close()
stderr.close()
return_code = proc.returncode
# job_script might have set return code so use that if set, otherwise use this one.
# Should there be someway to signal failure if this is non-0 in that case?
self._write_return_code_if_unset(job_id, str(return_code))
finally:
with self._get_job_lock(job_id):
self._finish_execution(job_id)
# with job lock
def _finish_execution(self, job_id):
super(Manager, self)._finish_execution(job_id)
self._job_directory(job_id).remove_metadata(JOB_FILE_PID)
# with job lock
def _get_status(self, job_id):
return super(Manager, self)._get_status(job_id)
# with job lock
def _was_cancelled(self, job_id):
return super(Manager, self)._was_cancelled(job_id)
# with job lock
def _record_pid(self, job_id, pid):
self._job_directory(job_id).store_metadata(JOB_FILE_PID, str(pid))
# with job lock
def _get_pid_for_killing_or_cancel(self, job_id):
job_status = self._get_status(job_id)
if job_status not in [status.RUNNING, status.QUEUED]:
return
pid = self.__get_pid(job_id)
self._record_cancel(job_id)
if pid is None:
self._job_directory(job_id).remove_metadata(JOB_FILE_SUBMITTED)
return pid
def _run(self, job_id, command_line, background=True):
with self._get_job_lock(job_id):
if self._was_cancelled(job_id):
return
proc, stdout, stderr = self._proc_for_job_id(job_id, command_line)
with self._get_job_lock(job_id):
self._record_pid(job_id, proc.pid)
self._start_monitor(job_id, proc, stdout, stderr, background=background)
def _proc_for_job_id(self, job_id, command_line):
job_directory = self.job_directory(job_id)
working_directory = job_directory.working_directory()
stdout = self._open_standard_output(job_id)
stderr = self._open_standard_error(job_id)
proc = execute(command_line=command_line,
working_directory=working_directory,
stdout=stdout,
stderr=stderr)
return proc, stdout, stderr
def launch(self, job_id, command_line, submit_params={}, dependencies_description=None, env=[], setup_params=None):
command_line = self._prepare_run(job_id, command_line, dependencies_description=dependencies_description, env=env, setup_params=setup_params)
self._run(job_id, command_line)
class CoexecutionManager(BaseUnqueuedManager):
"""Manager that managers one job in a pod-like environment.
Assume some process in another container will execute the command.
"""
manager_type = "coexecution"
def __init__(self, name, app, **kwds):
super(CoexecutionManager, self).__init__(name, app, **kwds)
def get_status(self, job_id):
return self._get_status(job_id)
def kill(self, job_id):
log.info("Attempting to kill job with job_id %s - unimplemented in CoexecutionManager..." % job_id)
def _monitor_execution(self, job_id):
return_code_path = self._return_code_path(job_id)
# Write dummy JOB_FILE_PID so get_status thinks this job is running.
self._job_directory(job_id).store_metadata(JOB_FILE_PID, "1")
try:
while not os.path.exists(return_code_path):
time.sleep(0.1)
print("monitoring for %s" % return_code_path)
continue
print("found return code path...")
self._job_directory(job_id).remove_metadata(JOB_FILE_PID)
time.sleep(1)
finally:
self._finish_execution(job_id)
def launch(self, job_id, command_line, submit_params={}, dependencies_description=None, env=[], setup_params=None):
command_line = self._prepare_run(job_id, command_line, dependencies_description=dependencies_description, env=env, setup_params=setup_params)
job_directory = self.job_directory(job_id)
working_directory = job_directory.working_directory()
command_line += " > '%s' 2> '%s'" % (
self._stdout_path(job_id),
self._stderr_path(job_id),
)
command_line = "cd '%s'; sh %s" % (working_directory, command_line)
self._write_command_line(job_id, command_line)
self._start_monitor(job_id)
def execute(command_line, working_directory, stdout, stderr):
preexec_fn = None
if not (platform.system() == 'Windows'):
preexec_fn = os.setpgrp
proc = Popen(args=command_line,
shell=True,
cwd=working_directory,
stdout=stdout,
stderr=stderr,
preexec_fn=preexec_fn)
return proc
__all__ = ['Manager']
|
natefoo/pulsar
|
pulsar/managers/unqueued.py
|
Python
|
apache-2.0
| 8,600
|
[
"Galaxy"
] |
87f6353e286a542b2421a6bd22ddce2a40b199a7c713e3abc818e79fd5cb2f64
|
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module defines standard transformations which transforms a structure into
another structure. Standard transformations operate in a structure-wide manner,
rather than site-specific manner.
All transformations should inherit the AbstractTransformation ABC.
"""
import logging
from fractions import Fraction
from typing import Optional, Union
from numpy import around
from pymatgen.analysis.bond_valence import BVAnalyzer
from pymatgen.analysis.elasticity.strain import Deformation
from pymatgen.analysis.ewald import EwaldMinimizer, EwaldSummation
from pymatgen.analysis.structure_matcher import StructureMatcher
from pymatgen.core.composition import Composition
from pymatgen.core.operations import SymmOp
from pymatgen.core.periodic_table import get_el_sp
from pymatgen.core.structure import Lattice, Structure
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
from pymatgen.transformations.site_transformations import (
PartialRemoveSitesTransformation,
)
from pymatgen.transformations.transformation_abc import AbstractTransformation
logger = logging.getLogger(__name__)
class RotationTransformation(AbstractTransformation):
"""
The RotationTransformation applies a rotation to a structure.
"""
def __init__(self, axis, angle, angle_in_radians=False):
"""
Args:
axis (3x1 array): Axis of rotation, e.g., [1, 0, 0]
angle (float): Angle to rotate
angle_in_radians (bool): Set to True if angle is supplied in radians.
Else degrees are assumed.
"""
self.axis = axis
self.angle = angle
self.angle_in_radians = angle_in_radians
self._symmop = SymmOp.from_axis_angle_and_translation(self.axis, self.angle, self.angle_in_radians)
def apply_transformation(self, structure):
"""
Apply the transformation.
Args:
structure (Structure): Input Structure
Returns:
Rotated Structure.
"""
s = structure.copy()
s.apply_operation(self._symmop)
return s
def __str__(self):
return "Rotation Transformation about axis " + "{} with angle = {:.4f} {}".format(
self.axis, self.angle, "radians" if self.angle_in_radians else "degrees"
)
def __repr__(self):
return self.__str__()
@property
def inverse(self):
"""
Returns:
Inverse Transformation.
"""
return RotationTransformation(self.axis, -self.angle, self.angle_in_radians)
@property
def is_one_to_many(self):
"""Returns: False"""
return False
class OxidationStateDecorationTransformation(AbstractTransformation):
"""
This transformation decorates a structure with oxidation states.
"""
def __init__(self, oxidation_states):
"""
Args:
oxidation_states (dict): Oxidation states supplied as a dict,
e.g., {"Li":1, "O":-2}
"""
self.oxidation_states = oxidation_states
def apply_transformation(self, structure):
"""
Apply the transformation.
Args:
structure (Structure): Input Structure
Returns:
Oxidation state decorated Structure.
"""
s = structure.copy()
s.add_oxidation_state_by_element(self.oxidation_states)
return s
@property
def inverse(self):
"""
Returns: None
"""
return None
@property
def is_one_to_many(self):
"""
Returns: False
"""
return False
class AutoOxiStateDecorationTransformation(AbstractTransformation):
"""
This transformation automatically decorates a structure with oxidation
states using a bond valence approach.
"""
def __init__(
self,
symm_tol=0.1,
max_radius=4,
max_permutations=100000,
distance_scale_factor=1.015,
):
"""
Args:
symm_tol (float): Symmetry tolerance used to determine which sites are
symmetrically equivalent. Set to 0 to turn off symmetry.
max_radius (float): Maximum radius in Angstrom used to find nearest
neighbors.
max_permutations (int): Maximum number of permutations of oxidation
states to test.
distance_scale_factor (float): A scale factor to be applied. This is
useful for scaling distances, esp in the case of
calculation-relaxed structures, which may tend to under (GGA) or
over bind (LDA). The default of 1.015 works for GGA. For
experimental structure, set this to 1.
"""
self.symm_tol = symm_tol
self.max_radius = max_radius
self.max_permutations = max_permutations
self.distance_scale_factor = distance_scale_factor
self.analyzer = BVAnalyzer(symm_tol, max_radius, max_permutations, distance_scale_factor)
def apply_transformation(self, structure):
"""
Apply the transformation.
Args:
structure (Structure): Input Structure
Returns:
Oxidation state decorated Structure.
"""
return self.analyzer.get_oxi_state_decorated_structure(structure)
@property
def inverse(self):
"""
Returns: None
"""
return None
@property
def is_one_to_many(self):
"""
Returns: False
"""
return False
class OxidationStateRemovalTransformation(AbstractTransformation):
"""
This transformation removes oxidation states from a structure.
"""
def __init__(self):
"""
No arg needed.
"""
pass
def apply_transformation(self, structure): # pylint: disable=R0201
"""
Apply the transformation.
Args:
structure (Structure): Input Structure
Returns:
Non-oxidation state decorated Structure.
"""
s = structure.copy()
s.remove_oxidation_states()
return s
@property
def inverse(self):
"""
Returns: None
"""
return None
@property
def is_one_to_many(self):
"""
Returns: False
"""
return False
class SupercellTransformation(AbstractTransformation):
"""
The RotationTransformation applies a rotation to a structure.
"""
def __init__(self, scaling_matrix=((1, 0, 0), (0, 1, 0), (0, 0, 1))):
"""
Args:
scaling_matrix: A matrix of transforming the lattice vectors.
Defaults to the identity matrix. Has to be all integers. e.g.,
[[2,1,0],[0,3,0],[0,0,1]] generates a new structure with
lattice vectors a" = 2a + b, b" = 3b, c" = c where a, b, and c
are the lattice vectors of the original structure.
"""
self.scaling_matrix = scaling_matrix
@staticmethod
def from_scaling_factors(scale_a=1, scale_b=1, scale_c=1):
"""
Convenience method to get a SupercellTransformation from a simple
series of three numbers for scaling each lattice vector. Equivalent to
calling the normal with [[scale_a, 0, 0], [0, scale_b, 0],
[0, 0, scale_c]]
Args:
scale_a: Scaling factor for lattice direction a. Defaults to 1.
scale_b: Scaling factor for lattice direction b. Defaults to 1.
scale_c: Scaling factor for lattice direction c. Defaults to 1.
Returns:
SupercellTransformation.
"""
return SupercellTransformation([[scale_a, 0, 0], [0, scale_b, 0], [0, 0, scale_c]])
def apply_transformation(self, structure):
"""
Apply the transformation.
Args:
structure (Structure): Input Structure
Returns:
Supercell Structure.
"""
return structure * self.scaling_matrix
def __str__(self):
return "Supercell Transformation with scaling matrix " + f"{self.scaling_matrix}"
def __repr__(self):
return self.__str__()
@property
def inverse(self):
"""
Raises: NotImplementedError
"""
raise NotImplementedError()
@property
def is_one_to_many(self):
"""
Returns: False
"""
return False
class SubstitutionTransformation(AbstractTransformation):
"""
This transformation substitutes species for one another.
"""
def __init__(self, species_map):
"""
Args:
species_map: A dict or list of tuples containing the species mapping in
string-string pairs. E.g., {"Li":"Na"} or [("Fe2+","Mn2+")].
Multiple substitutions can be done. Overloaded to accept
sp_and_occu dictionary E.g. {"Si: {"Ge":0.75, "C":0.25}},
which substitutes a single species with multiple species to
generate a disordered structure.
"""
self.species_map = species_map
self._species_map = dict(species_map)
for k, v in self._species_map.items():
if isinstance(v, (tuple, list)):
self._species_map[k] = dict(v)
def apply_transformation(self, structure):
"""
Apply the transformation.
Args:
structure (Structure): Input Structure
Returns:
Substituted Structure.
"""
species_map = {}
for k, v in self._species_map.items():
if isinstance(v, dict):
value = {get_el_sp(x): y for x, y in v.items()}
else:
value = get_el_sp(v)
species_map[get_el_sp(k)] = value
s = structure.copy()
s.replace_species(species_map)
return s
def __str__(self):
return "Substitution Transformation :" + ", ".join(
[str(k) + "->" + str(v) for k, v in self._species_map.items()]
)
def __repr__(self):
return self.__str__()
@property
def inverse(self):
"""
Returns:
Inverse Transformation.
"""
inverse_map = {v: k for k, v in self._species_map.items()}
return SubstitutionTransformation(inverse_map)
@property
def is_one_to_many(self):
"""
Returns: False
"""
return False
class RemoveSpeciesTransformation(AbstractTransformation):
"""
Remove all occurrences of some species from a structure.
"""
def __init__(self, species_to_remove):
"""
Args:
species_to_remove: List of species to remove. E.g., ["Li", "Mn"]
"""
self.species_to_remove = species_to_remove
def apply_transformation(self, structure):
"""
Apply the transformation.
Args:
structure (Structure): Input Structure
Returns:
Structure with species removed.
"""
s = structure.copy()
for sp in self.species_to_remove:
s.remove_species([get_el_sp(sp)])
return s
def __str__(self):
return "Remove Species Transformation :" + ", ".join(self.species_to_remove)
def __repr__(self):
return self.__str__()
@property
def inverse(self):
"""
Returns: None
"""
return None
@property
def is_one_to_many(self):
"""
Returns: False
"""
return False
class PartialRemoveSpecieTransformation(AbstractTransformation):
"""
Remove fraction of specie from a structure.
Requires an oxidation state decorated structure for ewald sum to be
computed.
Given that the solution to selecting the right removals is NP-hard, there
are several algorithms provided with varying degrees of accuracy and speed.
Please see
:class:`pymatgen.transformations.site_transformations.PartialRemoveSitesTransformation`.
"""
ALGO_FAST = 0
ALGO_COMPLETE = 1
ALGO_BEST_FIRST = 2
ALGO_ENUMERATE = 3
def __init__(self, specie_to_remove, fraction_to_remove, algo=ALGO_FAST):
"""
Args:
specie_to_remove: Species to remove. Must have oxidation state E.g.,
"Li+"
fraction_to_remove: Fraction of specie to remove. E.g., 0.5
algo: This parameter allows you to choose the algorithm to perform
ordering. Use one of PartialRemoveSpecieTransformation.ALGO_*
variables to set the algo.
"""
self.specie_to_remove = specie_to_remove
self.fraction_to_remove = fraction_to_remove
self.algo = algo
def apply_transformation(self, structure, return_ranked_list=False):
"""
Apply the transformation.
Args:
structure: input structure
return_ranked_list (bool/int): Boolean stating whether or not
multiple structures are returned. If return_ranked_list is
an int, that number of structures is returned.
Returns:
Depending on returned_ranked list, either a transformed structure
or a list of dictionaries, where each dictionary is of the form
{"structure" = .... , "other_arguments"}
the key "transformation" is reserved for the transformation that
was actually applied to the structure.
This transformation is parsed by the alchemy classes for generating
a more specific transformation history. Any other information will
be stored in the transformation_parameters dictionary in the
transmuted structure class.
"""
sp = get_el_sp(self.specie_to_remove)
specie_indices = [i for i in range(len(structure)) if structure[i].species == Composition({sp: 1})]
trans = PartialRemoveSitesTransformation([specie_indices], [self.fraction_to_remove], algo=self.algo)
return trans.apply_transformation(structure, return_ranked_list)
@property
def is_one_to_many(self):
"""
Returns: True
"""
return True
def __str__(self):
spec_str = [
f"Species = {self.specie_to_remove}",
f"Fraction to remove = {self.fraction_to_remove}",
f"ALGO = {self.algo}",
]
return "PartialRemoveSpecieTransformation : " + ", ".join(spec_str)
def __repr__(self):
return self.__str__()
@property
def inverse(self):
"""
Returns: None
"""
return None
class OrderDisorderedStructureTransformation(AbstractTransformation):
"""
Order a disordered structure. The disordered structure must be oxidation
state decorated for ewald sum to be computed. No attempt is made to perform
symmetry determination to reduce the number of combinations.
Hence, attempting to performing ordering on a large number of disordered
sites may be extremely expensive. The time scales approximately with the
number of possible combinations. The algorithm can currently compute
approximately 5,000,000 permutations per minute.
Also, simple rounding of the occupancies are performed, with no attempt
made to achieve a target composition. This is usually not a problem for
most ordering problems, but there can be times where rounding errors may
result in structures that do not have the desired composition.
This second step will be implemented in the next iteration of the code.
If multiple fractions for a single species are found for different sites,
these will be treated separately if the difference is above a threshold
tolerance. currently this is .1
For example, if a fraction of .25 Li is on sites 0,1,2,3 and .5 on sites
4, 5, 6, 7 then 1 site from [0,1,2,3] will be filled and 2 sites from [4,5,6,7]
will be filled, even though a lower energy combination might be found by
putting all lithium in sites [4,5,6,7].
USE WITH CARE.
"""
ALGO_FAST = 0
ALGO_COMPLETE = 1
ALGO_BEST_FIRST = 2
def __init__(self, algo=ALGO_FAST, symmetrized_structures=False, no_oxi_states=False):
"""
Args:
algo (int): Algorithm to use.
symmetrized_structures (bool): Whether the input structures are
instances of SymmetrizedStructure, and that their symmetry
should be used for the grouping of sites.
no_oxi_states (bool): Whether to remove oxidation states prior to
ordering.
"""
self.algo = algo
self._all_structures = []
self.no_oxi_states = no_oxi_states
self.symmetrized_structures = symmetrized_structures
def apply_transformation(self, structure, return_ranked_list=False):
"""
For this transformation, the apply_transformation method will return
only the ordered structure with the lowest Ewald energy, to be
consistent with the method signature of the other transformations.
However, all structures are stored in the all_structures attribute in
the transformation object for easy access.
Args:
structure: Oxidation state decorated disordered structure to order
return_ranked_list (bool): Whether or not multiple structures are
returned. If return_ranked_list is a number, that number of
structures is returned.
Returns:
Depending on returned_ranked list, either a transformed structure
or a list of dictionaries, where each dictionary is of the form
{"structure" = .... , "other_arguments"}
the key "transformation" is reserved for the transformation that
was actually applied to the structure.
This transformation is parsed by the alchemy classes for generating
a more specific transformation history. Any other information will
be stored in the transformation_parameters dictionary in the
transmuted structure class.
"""
try:
num_to_return = int(return_ranked_list)
except ValueError:
num_to_return = 1
num_to_return = max(1, num_to_return)
if self.no_oxi_states:
structure = Structure.from_sites(structure)
for i, site in enumerate(structure):
structure[i] = {"%s0+" % k.symbol: v for k, v in site.species.items()}
equivalent_sites = []
exemplars = []
# generate list of equivalent sites to order
# equivalency is determined by sp_and_occu and symmetry
# if symmetrized structure is true
for i, site in enumerate(structure):
if site.is_ordered:
continue
for j, ex in enumerate(exemplars):
sp = ex.species
if not site.species.almost_equals(sp):
continue
if self.symmetrized_structures:
sym_equiv = structure.find_equivalent_sites(ex)
sym_test = site in sym_equiv
else:
sym_test = True
if sym_test:
equivalent_sites[j].append(i)
break
else:
equivalent_sites.append([i])
exemplars.append(site)
# generate the list of manipulations and input structure
s = Structure.from_sites(structure)
m_list = []
for g in equivalent_sites:
total_occupancy = sum((structure[i].species for i in g), Composition())
total_occupancy = dict(total_occupancy.items())
# round total occupancy to possible values
for k, v in total_occupancy.items():
if abs(v - round(v)) > 0.25:
raise ValueError("Occupancy fractions not consistent with size of unit cell")
total_occupancy[k] = int(round(v))
# start with an ordered structure
initial_sp = max(total_occupancy.keys(), key=lambda x: abs(x.oxi_state))
for i in g:
s[i] = initial_sp
# determine the manipulations
for k, v in total_occupancy.items():
if k == initial_sp:
continue
m = [
k.oxi_state / initial_sp.oxi_state if initial_sp.oxi_state else 0,
v,
list(g),
k,
]
m_list.append(m)
# determine the number of empty sites
empty = len(g) - sum(total_occupancy.values())
if empty > 0.5:
m_list.append([0, empty, list(g), None])
matrix = EwaldSummation(s).total_energy_matrix
ewald_m = EwaldMinimizer(matrix, m_list, num_to_return, self.algo)
self._all_structures = []
lowest_energy = ewald_m.output_lists[0][0]
num_atoms = sum(structure.composition.values())
for output in ewald_m.output_lists:
s_copy = s.copy()
# do deletions afterwards because they screw up the indices of the
# structure
del_indices = []
for manipulation in output[1]:
if manipulation[1] is None:
del_indices.append(manipulation[0])
else:
s_copy[manipulation[0]] = manipulation[1]
s_copy.remove_sites(del_indices)
if self.no_oxi_states:
s_copy.remove_oxidation_states()
self._all_structures.append(
{
"energy": output[0],
"energy_above_minimum": (output[0] - lowest_energy) / num_atoms,
"structure": s_copy.get_sorted_structure(),
}
)
if return_ranked_list:
return self._all_structures[:num_to_return]
return self._all_structures[0]["structure"]
def __str__(self):
return "Order disordered structure transformation"
def __repr__(self):
return self.__str__()
@property
def inverse(self):
"""
Returns: None
"""
return None
@property
def is_one_to_many(self):
"""
Returns: True
"""
return True
@property
def lowest_energy_structure(self):
"""
:return: Lowest energy structure found.
"""
return self._all_structures[0]["structure"]
class PrimitiveCellTransformation(AbstractTransformation):
"""
This class finds the primitive cell of the input structure.
It returns a structure that is not necessarily orthogonalized
Author: Will Richards
"""
def __init__(self, tolerance=0.5):
"""
Args:
tolerance (float): Tolerance for each coordinate of a particular
site. For example, [0.5, 0, 0.5] in cartesian coordinates will be
considered to be on the same coordinates as [0, 0, 0] for a
tolerance of 0.5. Defaults to 0.5.
"""
self.tolerance = tolerance
def apply_transformation(self, structure):
"""
Returns most primitive cell for structure.
Args:
structure: A structure
Returns:
The most primitive structure found. The returned structure is
guaranteed to have len(new structure) <= len(structure).
"""
return structure.get_primitive_structure(tolerance=self.tolerance)
def __str__(self):
return "Primitive cell transformation"
def __repr__(self):
return self.__str__()
@property
def inverse(self):
"""
Returns: None
"""
return None
@property
def is_one_to_many(self):
"""
Returns: False
"""
return False
class ConventionalCellTransformation(AbstractTransformation):
"""
This class finds the conventional cell of the input structure.
"""
def __init__(self, symprec=0.01, angle_tolerance=5, international_monoclinic=True):
"""
Args:
symprec (float): tolerance as in SpacegroupAnalyzer
angle_tolerance (float): angle tolerance as in SpacegroupAnalyzer
international_monoclinic (bool): whether to use beta (True) or alpha (False)
as the non-right-angle in the unit cell
"""
self.symprec = symprec
self.angle_tolerance = angle_tolerance
self.international_monoclinic = international_monoclinic
def apply_transformation(self, structure):
"""
Returns most primitive cell for structure.
Args:
structure: A structure
Returns:
The same structure in a conventional standard setting
"""
sga = SpacegroupAnalyzer(structure, symprec=self.symprec, angle_tolerance=self.angle_tolerance)
return sga.get_conventional_standard_structure(international_monoclinic=self.international_monoclinic)
def __str__(self):
return "Conventional cell transformation"
def __repr__(self):
return self.__str__()
@property
def inverse(self):
"""
Returns: None
"""
return None
@property
def is_one_to_many(self):
"""
Returns: False
"""
return False
class PerturbStructureTransformation(AbstractTransformation):
"""
This transformation perturbs a structure by a specified distance in random
directions. Used for breaking symmetries.
"""
def __init__(
self,
distance: float = 0.01,
min_distance: Optional[Union[int, float]] = None,
):
"""
Args:
distance: Distance of perturbation in angstroms. All sites
will be perturbed by exactly that distance in a random
direction.
min_distance: if None, all displacements will be equidistant. If int
or float, perturb each site a distance drawn from the uniform
distribution between 'min_distance' and 'distance'.
"""
self.distance = distance
self.min_distance = min_distance
def apply_transformation(self, structure: Structure) -> Structure:
"""
Apply the transformation.
Args:
structure: Input Structure
Returns:
Structure with sites perturbed.
"""
s = structure.copy()
s.perturb(self.distance, min_distance=self.min_distance)
return s
def __str__(self):
return "PerturbStructureTransformation : " + f"Min_distance = {self.min_distance}"
def __repr__(self):
return self.__str__()
@property
def inverse(self):
"""
Returns: None
"""
return None
@property
def is_one_to_many(self):
"""
Returns: False
"""
return False
class DeformStructureTransformation(AbstractTransformation):
"""
This transformation deforms a structure by a deformation gradient matrix
"""
def __init__(self, deformation=((1, 0, 0), (0, 1, 0), (0, 0, 1))):
"""
Args:
deformation (array): deformation gradient for the transformation
"""
self._deform = Deformation(deformation)
self.deformation = self._deform.tolist()
def apply_transformation(self, structure):
"""
Apply the transformation.
Args:
structure (Structure): Input Structure
Returns:
Deformed Structure.
"""
return self._deform.apply_to_structure(structure)
def __str__(self):
return "DeformStructureTransformation : " + f"Deformation = {str(self.deformation)}"
def __repr__(self):
return self.__str__()
@property
def inverse(self):
"""
Returns:
Inverse Transformation.
"""
return DeformStructureTransformation(self._deform.inv)
@property
def is_one_to_many(self):
"""
Returns: False
"""
return False
class DiscretizeOccupanciesTransformation(AbstractTransformation):
"""
Discretizes the site occupancies in a disordered structure; useful for
grouping similar structures or as a pre-processing step for order-disorder
transformations.
"""
def __init__(self, max_denominator=5, tol=None, fix_denominator=False):
"""
Args:
max_denominator:
An integer maximum denominator for discretization. A higher
denominator allows for finer resolution in the site occupancies.
tol:
A float that sets the maximum difference between the original and
discretized occupancies before throwing an error. If None, it is
set to 1 / (4 * max_denominator).
fix_denominator(bool):
If True, will enforce a common denominator for all species.
This prevents a mix of denominators (for example, 1/3, 1/4)
that might require large cell sizes to perform an enumeration.
'tol' needs to be > 1.0 in some cases.
"""
self.max_denominator = max_denominator
self.tol = tol if tol is not None else 1 / (4 * max_denominator)
self.fix_denominator = fix_denominator
def apply_transformation(self, structure):
"""
Discretizes the site occupancies in the structure.
Args:
structure: disordered Structure to discretize occupancies
Returns:
A new disordered Structure with occupancies discretized
"""
if structure.is_ordered:
return structure
species = [dict(sp) for sp in structure.species_and_occu]
for sp in species:
for k, v in sp.items():
old_occ = sp[k]
new_occ = float(Fraction(old_occ).limit_denominator(self.max_denominator))
if self.fix_denominator:
new_occ = around(old_occ * self.max_denominator) / self.max_denominator
if round(abs(old_occ - new_occ), 6) > self.tol:
raise RuntimeError("Cannot discretize structure within tolerance!")
sp[k] = new_occ
return Structure(structure.lattice, species, structure.frac_coords)
def __str__(self):
return "DiscretizeOccupanciesTransformation"
def __repr__(self):
return self.__str__()
@property
def inverse(self):
"""
Returns: None
"""
return None
@property
def is_one_to_many(self):
"""
Returns: False
"""
return False
class ChargedCellTransformation(AbstractTransformation):
"""
The ChargedCellTransformation applies a charge to a structure (or defect
object).
"""
def __init__(self, charge=0):
"""
Args:
charge: A integer charge to apply to the structure.
Defaults to zero. Has to be a single integer. e.g. 2
"""
self.charge = charge
def apply_transformation(self, structure):
"""
Apply the transformation.
Args:
structure (Structure): Input Structure
Returns:
Charged Structure.
"""
s = structure.copy()
s.set_charge(self.charge)
return s
def __str__(self):
return "Structure with charge " + f"{self.charge}"
def __repr__(self):
return self.__str__()
@property
def inverse(self):
"""
Raises: NotImplementedError
"""
raise NotImplementedError()
@property
def is_one_to_many(self):
"""
Returns: False
"""
return False
class ScaleToRelaxedTransformation(AbstractTransformation):
"""
Takes the unrelaxed and relaxed structure and applies its site and volume
relaxation to a structurally similar structures (e.g. bulk: NaCl and PbTe
(rock-salt), slab: Sc(10-10) and Mg(10-10) (hcp), GB: Mo(001) sigma 5 GB,
Fe(001) sigma 5). Useful for finding an initial guess of a set of similar
structures closer to its most relaxed state.
"""
def __init__(self, unrelaxed_structure, relaxed_structure, species_map=None):
"""
Args:
unrelaxed_structure (Structure): Initial, unrelaxed structure
relaxed_structure (Structure): Relaxed structure
species_map (dict): A dict or list of tuples containing the species mapping in
string-string pairs. The first species corresponds to the relaxed
structure while the second corresponds to the species in the
structure to be scaled. E.g., {"Li":"Na"} or [("Fe2+","Mn2+")].
Multiple substitutions can be done. Overloaded to accept
sp_and_occu dictionary E.g. {"Si: {"Ge":0.75, "C":0.25}},
which substitutes a single species with multiple species to
generate a disordered structure.
"""
# Get the ratio matrix for lattice relaxation which can be
# applied to any similar structure to simulate volumetric relaxation
relax_params = list(relaxed_structure.lattice.abc)
relax_params.extend(relaxed_structure.lattice.angles)
unrelax_params = list(unrelaxed_structure.lattice.abc)
unrelax_params.extend(unrelaxed_structure.lattice.angles)
self.params_percent_change = []
for i, p in enumerate(relax_params):
self.params_percent_change.append(relax_params[i] / unrelax_params[i])
self.unrelaxed_structure = unrelaxed_structure
self.relaxed_structure = relaxed_structure
self.species_map = species_map
def apply_transformation(self, structure):
"""
Returns a copy of structure with lattice parameters
and sites scaled to the same degree as the relaxed_structure.
Arg:
structure (Structure): A structurally similar structure in
regards to crystal and site positions.
"""
if self.species_map is None:
match = StructureMatcher()
s_map = match.get_best_electronegativity_anonymous_mapping(self.unrelaxed_structure, structure)
else:
s_map = self.species_map
params = list(structure.lattice.abc)
params.extend(structure.lattice.angles)
new_lattice = Lattice.from_parameters(*[p * self.params_percent_change[i] for i, p in enumerate(params)])
species, frac_coords = [], []
for site in self.relaxed_structure:
species.append(s_map[site.specie])
frac_coords.append(site.frac_coords)
return Structure(new_lattice, species, frac_coords)
def __str__(self):
return "ScaleToRelaxedTransformation"
def __repr__(self):
return self.__str__()
@property
def inverse(self):
"""
Returns: None
"""
return None
@property
def is_one_to_many(self):
"""
Returns: False
"""
return False
|
vorwerkc/pymatgen
|
pymatgen/transformations/standard_transformations.py
|
Python
|
mit
| 35,536
|
[
"CRYSTAL",
"pymatgen"
] |
419bffca2f3eed2cf2f56ff44b67ce592d90169e5e543f0fffccfbf87a019b74
|
from __future__ import division
import warnings
import numpy as np
def monkhorst_pack(size):
"""Construct a uniform sampling of k-space of given size."""
if np.less_equal(size, 0).any():
raise ValueError('Illegal size: %s' % list(size))
kpts = np.indices(size).transpose((1, 2, 3, 0)).reshape((-1, 3))
return (kpts + 0.5) / size - 0.5
def get_monkhorst_pack_size_and_offset(kpts):
"""Find Monkhorst-Pack size and offset.
Returns (size, offset), where::
kpts = monkhorst_pack(size) + offset.
The set of k-points must not have been symmetry reduced."""
if len(kpts) == 1:
return np.ones(3, int), np.array(kpts[0], dtype=float)
size = np.zeros(3, int)
for c in range(3):
# Determine increment between k-points along current axis
delta = max(np.diff(np.sort(kpts[:, c])))
# Determine number of k-points as inverse of distance between kpoints
if delta > 1e-8:
size[c] = int(round(1.0 / delta))
else:
size[c] = 1
kpts0 = monkhorst_pack(size)
offsets = kpts - kpts0
# All offsets must be identical:
if (offsets.ptp(axis=0) > 1e-9).any():
raise ValueError('Not an ASE-style Monkhorst-Pack grid!')
return size, offsets[0].copy()
def get_monkhorst_shape(kpts):
warnings.warn('Use get_monkhorst_pack_size_and_offset()[0] instead.')
return get_monkhorst_pack_size_and_offset(kpts)[0]
def kpoint_convert(cell_cv, skpts_kc=None, ckpts_kv=None):
"""Convert k-points between scaled and cartesian coordinates.
Given the atomic unit cell, and either the scaled or cartesian k-point
coordinates, the other is determined.
The k-point arrays can be either a single point, or a list of points,
i.e. the dimension k can be empty or multidimensional.
"""
if ckpts_kv is None:
icell_cv = 2 * np.pi * np.linalg.inv(cell_cv).T
return np.dot(skpts_kc, icell_cv)
elif skpts_kc is None:
return np.dot(ckpts_kv, cell_cv.T) / (2 * np.pi)
else:
raise KeyError('Either scaled or cartesian coordinates must be given.')
def get_bandpath(points, cell, npoints=50):
"""Make a list of kpoints defining the path between the given points.
points: list
List of special IBZ point pairs, e.g. ``points =
[W, L, Gamma, X, W, K]``. These should be given in
scaled coordinates.
cell: 3x3 ndarray
Unit cell of the atoms.
npoints: int
Length of the output kpts list.
Return list of k-points, list of x-coordinates and list of
x-coordinates of special points."""
points = np.asarray(points)
dists = points[1:] - points[:-1]
lengths = [np.linalg.norm(d) for d in kpoint_convert(cell, skpts_kc=dists)]
length = sum(lengths)
kpts = []
x0 = 0
x = []
X = [0]
for P, d, L in zip(points[:-1], dists, lengths):
n = int(round(L * (npoints - 1 - len(x)) / (length - x0)))
for t in np.linspace(0, 1, n, endpoint=False):
kpts.append(P + t * d)
x.append(x0 + t * L)
x0 += L
X.append(x0)
kpts.append(points[-1])
x.append(x0)
return np.array(kpts), np.array(x), np.array(X)
# The following is a list of the critical points in the 1. Brillouin zone
# for some typical crystal structures.
# (In units of the reciprocal basis vectors)
# See http://en.wikipedia.org/wiki/Brillouin_zone
ibz_points = {'cubic': {'Gamma': [0, 0, 0 ],
'X': [0, 0 / 2, 1 / 2],
'R': [1 / 2, 1 / 2, 1 / 2],
'M': [0 / 2, 1 / 2, 1 / 2]},
'fcc': {'Gamma': [0, 0, 0 ],
'X': [1 / 2, 0, 1 / 2],
'W': [1 / 2, 1 / 4, 3 / 4],
'K': [3 / 8, 3 / 8, 3 / 4],
'U': [5 / 8, 1 / 4, 5 / 8],
'L': [1 / 2, 1 / 2, 1 / 2]},
'bcc': {'Gamma': [0, 0, 0 ],
'H': [1 / 2, -1 / 2, 1 / 2],
'N': [0, 0, 1 / 2],
'P': [1 / 4, 1 / 4, 1 / 4]},
'hexagonal':
{'Gamma': [0, 0, 0 ],
'M': [0, 1 / 2, 0 ],
'K': [-1 / 3, 1 / 3, 0 ],
'A': [0, 0, 1 / 2 ],
'L': [0, 1 / 2, 1 / 2 ],
'H': [-1 / 3, 1 / 3, 1 / 2 ]},
'tetragonal':
{'Gamma': [0, 0, 0 ],
'X': [1 / 2, 0, 0 ],
'M': [1 / 2, 1 / 2, 0 ],
'Z': [0, 0, 1 / 2 ],
'R': [1 / 2, 0, 1 / 2 ],
'A': [1 / 2, 1 / 2, 1 / 2 ]},
'orthorhombic':
{'Gamma': [0, 0, 0 ],
'R': [1 / 2, 1 / 2, 1 / 2 ],
'S': [1 / 2, 1 / 2, 0 ],
'T': [0, 1 / 2, 1 / 2 ],
'U': [1 / 2, 0, 1 / 2 ],
'X': [1 / 2, 0, 0 ],
'Y': [0, 1 / 2, 0 ],
'Z': [0, 0, 1 / 2 ]},
}
# ChadiCohen k point grids. The k point grids are given in units of the
# reciprocal unit cell. The variables are named after the following
# convention: cc+'<Nkpoints>'+_+'shape'. For example an 18 k point
# sq(3)xsq(3) is named 'cc18_sq3xsq3'.
cc6_1x1 = np.array([1, 1, 0, 1, 0, 0, 0, -1, 0, -1, -1, 0, -1, 0, 0,
0, 1, 0]).reshape((6, 3)) / 3.0
cc12_2x3 = np.array([3, 4, 0, 3, 10, 0, 6, 8, 0, 3, -2, 0, 6, -4, 0,
6, 2, 0, -3, 8, 0, -3, 2, 0, -3, -4, 0, -6, 4, 0, -6, -2, 0, -6,
-8, 0]).reshape((12, 3)) / 18.0
cc18_sq3xsq3 = np.array([2, 2, 0, 4, 4, 0, 8, 2, 0, 4, -2, 0, 8, -4,
0, 10, -2, 0, 10, -8, 0, 8, -10, 0, 2, -10, 0, 4, -8, 0, -2, -8,
0, 2, -4, 0, -4, -4, 0, -2, -2, 0, -4, 2, 0, -2, 4, 0, -8, 4, 0,
-4, 8, 0]).reshape((18, 3)) / 18.0
cc18_1x1 = np.array([2, 4, 0, 2, 10, 0, 4, 8, 0, 8, 4, 0, 8, 10, 0,
10, 8, 0, 2, -2, 0, 4, -4, 0, 4, 2, 0, -2, 8, 0, -2, 2, 0, -2, -4,
0, -4, 4, 0, -4, -2, 0, -4, -8, 0, -8, 2, 0, -8, -4, 0, -10, -2,
0]).reshape((18, 3)) / 18.0
cc54_sq3xsq3 = np.array([4, -10, 0, 6, -10, 0, 0, -8, 0, 2, -8, 0, 6,
-8, 0, 8, -8, 0, -4, -6, 0, -2, -6, 0, 2, -6, 0, 4, -6, 0, 8, -6,
0, 10, -6, 0, -6, -4, 0, -2, -4, 0, 0, -4, 0, 4, -4, 0, 6, -4, 0,
10, -4, 0, -6, -2, 0, -4, -2, 0, 0, -2, 0, 2, -2, 0, 6, -2, 0, 8,
-2, 0, -8, 0, 0, -4, 0, 0, -2, 0, 0, 2, 0, 0, 4, 0, 0, 8, 0, 0,
-8, 2, 0, -6, 2, 0, -2, 2, 0, 0, 2, 0, 4, 2, 0, 6, 2, 0, -10, 4,
0, -6, 4, 0, -4, 4, 0, 0, 4, 0, 2, 4, 0, 6, 4, 0, -10, 6, 0, -8,
6, 0, -4, 6, 0, -2, 6, 0, 2, 6, 0, 4, 6, 0, -8, 8, 0, -6, 8, 0,
-2, 8, 0, 0, 8, 0, -6, 10, 0, -4, 10, 0]).reshape((54, 3)) / 18.0
cc54_1x1 = np.array([2, 2, 0, 4, 4, 0, 8, 8, 0, 6, 8, 0, 4, 6, 0, 6,
10, 0, 4, 10, 0, 2, 6, 0, 2, 8, 0, 0, 2, 0, 0, 4, 0, 0, 8, 0, -2,
6, 0, -2, 4, 0, -4, 6, 0, -6, 4, 0, -4, 2, 0, -6, 2, 0, -2, 0, 0,
-4, 0, 0, -8, 0, 0, -8, -2, 0, -6, -2, 0, -10, -4, 0, -10, -6, 0,
-6, -4, 0, -8, -6, 0, -2, -2, 0, -4, -4, 0, -8, -8, 0, 4, -2, 0,
6, -2, 0, 6, -4, 0, 2, 0, 0, 4, 0, 0, 6, 2, 0, 6, 4, 0, 8, 6, 0,
8, 0, 0, 8, 2, 0, 10, 4, 0, 10, 6, 0, 2, -4, 0, 2, -6, 0, 4, -6,
0, 0, -2, 0, 0, -4, 0, -2, -6, 0, -4, -6, 0, -6, -8, 0, 0, -8, 0,
-2, -8, 0, -4, -10, 0, -6, -10, 0]).reshape((54, 3)) / 18.0
cc162_sq3xsq3 = np.array([-8, 16, 0, -10, 14, 0, -7, 14, 0, -4, 14,
0, -11, 13, 0, -8, 13, 0, -5, 13, 0, -2, 13, 0, -13, 11, 0, -10,
11, 0, -7, 11, 0, -4, 11, 0, -1, 11, 0, 2, 11, 0, -14, 10, 0, -11,
10, 0, -8, 10, 0, -5, 10, 0, -2, 10, 0, 1, 10, 0, 4, 10, 0, -16,
8, 0, -13, 8, 0, -10, 8, 0, -7, 8, 0, -4, 8, 0, -1, 8, 0, 2, 8, 0,
5, 8, 0, 8, 8, 0, -14, 7, 0, -11, 7, 0, -8, 7, 0, -5, 7, 0, -2, 7,
0, 1, 7, 0, 4, 7, 0, 7, 7, 0, 10, 7, 0, -13, 5, 0, -10, 5, 0, -7,
5, 0, -4, 5, 0, -1, 5, 0, 2, 5, 0, 5, 5, 0, 8, 5, 0, 11, 5, 0,
-14, 4, 0, -11, 4, 0, -8, 4, 0, -5, 4, 0, -2, 4, 0, 1, 4, 0, 4, 4,
0, 7, 4, 0, 10, 4, 0, -13, 2, 0, -10, 2, 0, -7, 2, 0, -4, 2, 0,
-1, 2, 0, 2, 2, 0, 5, 2, 0, 8, 2, 0, 11, 2, 0, -11, 1, 0, -8, 1,
0, -5, 1, 0, -2, 1, 0, 1, 1, 0, 4, 1, 0, 7, 1, 0, 10, 1, 0, 13, 1,
0, -10, -1, 0, -7, -1, 0, -4, -1, 0, -1, -1, 0, 2, -1, 0, 5, -1,
0, 8, -1, 0, 11, -1, 0, 14, -1, 0, -11, -2, 0, -8, -2, 0, -5, -2,
0, -2, -2, 0, 1, -2, 0, 4, -2, 0, 7, -2, 0, 10, -2, 0, 13, -2, 0,
-10, -4, 0, -7, -4, 0, -4, -4, 0, -1, -4, 0, 2, -4, 0, 5, -4, 0,
8, -4, 0, 11, -4, 0, 14, -4, 0, -8, -5, 0, -5, -5, 0, -2, -5, 0,
1, -5, 0, 4, -5, 0, 7, -5, 0, 10, -5, 0, 13, -5, 0, 16, -5, 0, -7,
-7, 0, -4, -7, 0, -1, -7, 0, 2, -7, 0, 5, -7, 0, 8, -7, 0, 11, -7,
0, 14, -7, 0, 17, -7, 0, -8, -8, 0, -5, -8, 0, -2, -8, 0, 1, -8,
0, 4, -8, 0, 7, -8, 0, 10, -8, 0, 13, -8, 0, 16, -8, 0, -7, -10,
0, -4, -10, 0, -1, -10, 0, 2, -10, 0, 5, -10, 0, 8, -10, 0, 11,
-10, 0, 14, -10, 0, 17, -10, 0, -5, -11, 0, -2, -11, 0, 1, -11, 0,
4, -11, 0, 7, -11, 0, 10, -11, 0, 13, -11, 0, 16, -11, 0, -1, -13,
0, 2, -13, 0, 5, -13, 0, 8, -13, 0, 11, -13, 0, 14, -13, 0, 1,
-14, 0, 4, -14, 0, 7, -14, 0, 10, -14, 0, 13, -14, 0, 5, -16, 0,
8, -16, 0, 11, -16, 0, 7, -17, 0, 10, -17, 0]).reshape((162, 3)) / 27.0
cc162_1x1 = np.array([-8, -16, 0, -10, -14, 0, -7, -14, 0, -4, -14,
0, -11, -13, 0, -8, -13, 0, -5, -13, 0, -2, -13, 0, -13, -11, 0,
-10, -11, 0, -7, -11, 0, -4, -11, 0, -1, -11, 0, 2, -11, 0, -14,
-10, 0, -11, -10, 0, -8, -10, 0, -5, -10, 0, -2, -10, 0, 1, -10,
0, 4, -10, 0, -16, -8, 0, -13, -8, 0, -10, -8, 0, -7, -8, 0, -4,
-8, 0, -1, -8, 0, 2, -8, 0, 5, -8, 0, 8, -8, 0, -14, -7, 0, -11,
-7, 0, -8, -7, 0, -5, -7, 0, -2, -7, 0, 1, -7, 0, 4, -7, 0, 7, -7,
0, 10, -7, 0, -13, -5, 0, -10, -5, 0, -7, -5, 0, -4, -5, 0, -1,
-5, 0, 2, -5, 0, 5, -5, 0, 8, -5, 0, 11, -5, 0, -14, -4, 0, -11,
-4, 0, -8, -4, 0, -5, -4, 0, -2, -4, 0, 1, -4, 0, 4, -4, 0, 7, -4,
0, 10, -4, 0, -13, -2, 0, -10, -2, 0, -7, -2, 0, -4, -2, 0, -1,
-2, 0, 2, -2, 0, 5, -2, 0, 8, -2, 0, 11, -2, 0, -11, -1, 0, -8,
-1, 0, -5, -1, 0, -2, -1, 0, 1, -1, 0, 4, -1, 0, 7, -1, 0, 10, -1,
0, 13, -1, 0, -10, 1, 0, -7, 1, 0, -4, 1, 0, -1, 1, 0, 2, 1, 0, 5,
1, 0, 8, 1, 0, 11, 1, 0, 14, 1, 0, -11, 2, 0, -8, 2, 0, -5, 2, 0,
-2, 2, 0, 1, 2, 0, 4, 2, 0, 7, 2, 0, 10, 2, 0, 13, 2, 0, -10, 4,
0, -7, 4, 0, -4, 4, 0, -1, 4, 0, 2, 4, 0, 5, 4, 0, 8, 4, 0, 11, 4,
0, 14, 4, 0, -8, 5, 0, -5, 5, 0, -2, 5, 0, 1, 5, 0, 4, 5, 0, 7, 5,
0, 10, 5, 0, 13, 5, 0, 16, 5, 0, -7, 7, 0, -4, 7, 0, -1, 7, 0, 2,
7, 0, 5, 7, 0, 8, 7, 0, 11, 7, 0, 14, 7, 0, 17, 7, 0, -8, 8, 0,
-5, 8, 0, -2, 8, 0, 1, 8, 0, 4, 8, 0, 7, 8, 0, 10, 8, 0, 13, 8, 0,
16, 8, 0, -7, 10, 0, -4, 10, 0, -1, 10, 0, 2, 10, 0, 5, 10, 0, 8,
10, 0, 11, 10, 0, 14, 10, 0, 17, 10, 0, -5, 11, 0, -2, 11, 0, 1,
11, 0, 4, 11, 0, 7, 11, 0, 10, 11, 0, 13, 11, 0, 16, 11, 0, -1,
13, 0, 2, 13, 0, 5, 13, 0, 8, 13, 0, 11, 13, 0, 14, 13, 0, 1, 14,
0, 4, 14, 0, 7, 14, 0, 10, 14, 0, 13, 14, 0, 5, 16, 0, 8, 16, 0,
11, 16, 0, 7, 17, 0, 10, 17, 0]).reshape((162, 3)) / 27.0
|
grhawk/ASE
|
tools/ase/dft/kpoints.py
|
Python
|
gpl-2.0
| 11,540
|
[
"ASE",
"CRYSTAL"
] |
8ef4d5273334dd101d4d3ce2a1576318b2c1994b3b96a5b7e9abc7d8205fc40e
|
# Copyright (C) 2012,2013
# Max Planck Institute for Polymer Research
# Copyright (C) 2008,2009,2010,2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
r"""
************************************
**espressopp.integrator.Isokinetic**
************************************
.. function:: espressopp.integrator.Isokinetic(system)
:param system:
:type system:
"""
from espressopp.esutil import cxxinit
from espressopp import pmi
from espressopp.integrator.Extension import *
from _espressopp import integrator_Isokinetic
class IsokineticLocal(ExtensionLocal, integrator_Isokinetic):
def __init__(self, system):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
cxxinit(self, integrator_Isokinetic, system)
if pmi.isController :
class Isokinetic(Extension):
__metaclass__ = pmi.Proxy
pmiproxydefs = dict(
cls = 'espressopp.integrator.IsokineticLocal',
pmiproperty = [ 'temperature', 'coupling' ]
)
|
capoe/espressopp.soap
|
src/integrator/Isokinetic.py
|
Python
|
gpl-3.0
| 1,750
|
[
"ESPResSo"
] |
f99fd0b9d65816427f8a5f20799da64ececb90ef88e03f52ce52400d6441a314
|
#
# Copyright 2011 - 2013 Brian R. D'Urso
#
# This file is part of Python Instrument Control System, also known as Pythics.
#
# Pythics is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Pythics is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Pythics. If not, see <http://www.gnu.org/licenses/>.
#
# this file is used to generate documentation with Sphinx and autodoc
from pythics.qwt_proxies import *
keys = globals().keys()
for k in keys:
# rename classes and make them look like they came from this module
# for sphinx autodoc
if 'Proxy' in k:
cls = globals()[k]
#print cls
old_name = cls.__name__
# strip the string 'Proxy' off the end of each name
new_name = old_name[0:-5]
cls.__name__ = new_name
cls.__module__ = 'qwt'
globals().pop(old_name)
globals()[new_name] = cls
del keys, k, cls, old_name, new_name
|
LunarLanding/Pythics
|
doc/qwt.py
|
Python
|
gpl-3.0
| 1,365
|
[
"Brian"
] |
9b4471a1f9c42758194acdaf98c515858c6ec3b2735ce7e81a66d341f7158560
|
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
from future.builtins import zip, range
from future.utils import viewkeys, viewitems
from collections import Counter, defaultdict, OrderedDict
from warnings import warn
import numpy as np
from scipy.stats import entropy
from skbio.stats.distance import DistanceMatrix
from skbio.io.util import open_file
from ._exception import SequenceCollectionError, StockholmParseError
class SequenceCollection(object):
"""Class for storing collections of biological sequences.
Parameters
----------
seqs : list of `skbio.sequence.BiologicalSequence` objects
The `skbio.sequence.BiologicalSequence` objects to load into
a new `SequenceCollection` object.
validate : bool, optional
If True, runs the `is_valid` method after construction and raises
`SequenceCollectionError` if ``is_valid == False``.
Raises
------
skbio.alignment.SequenceCollectionError
If ``validate == True`` and ``is_valid == False``.
See Also
--------
skbio.sequence.BiologicalSequence
skbio.sequence.NucleotideSequence
skbio.sequence.DNASequence
skbio.sequence.RNASequence
Alignment
skbio.parse.sequences
skbio.parse.sequences.parse_fasta
Examples
--------
>>> from skbio.alignment import SequenceCollection
>>> from skbio.sequence import DNA
>>> sequences = [DNA('ACCGT', id="seq1"),
... DNA('AACCGGT', id="seq2")]
>>> s1 = SequenceCollection(sequences)
>>> s1
<SequenceCollection: n=2; mean +/- std length=6.00 +/- 1.00>
"""
@classmethod
def from_fasta_records(cls, fasta_records, seq_constructor,
validate=False):
r"""Initialize a `SequenceCollection` object
Parameters
----------
fasta_records : iterator of tuples
The records to load into a new `SequenceCollection` object. These
should be tuples of ``(sequence_id, sequence)``.
seq_constructor : skbio.sequence.BiologicalSequence
validate : bool, optional
If True, runs the `is_valid` method after construction and raises
`SequenceCollectionError` if ``is_valid == False``.
Returns
-------
SequenceCollection (or a derived class)
The new `SequenceCollection` object.
Raises
------
skbio.alignment.SequenceCollectionError
If ``validate == True`` and ``is_valid == False``.
See Also
--------
skbio.sequence.BiologicalSequence
skbio.sequence.NucleotideSequence
skbio.sequence.DNASequence
skbio.sequence.RNASequence
Alignment
skbio.parse.sequences
skbio.parse.sequences.parse_fasta
Examples
--------
>>> from skbio.alignment import SequenceCollection
>>> from skbio.parse.sequences import parse_fasta
>>> from StringIO import StringIO
>>> from skbio.sequence import DNA
>>> fasta_f = StringIO('>seq1\nACCGT\n>seq2\nAACCGGT\n')
>>> s1 = SequenceCollection.from_fasta_records(
... parse_fasta(fasta_f), DNA)
>>> s1
<SequenceCollection: n=2; mean +/- std length=6.00 +/- 1.00>
>>> records = [('seq1', 'ACCGT'), ('seq2', 'AACCGGT')]
>>> s1 = SequenceCollection.from_fasta_records(records, DNA)
>>> s1
<SequenceCollection: n=2; mean +/- std length=6.00 +/- 1.00>
"""
data = []
for seq_id, seq in fasta_records:
try:
id, description = seq_id.split(None, 1)
except ValueError:
id = seq_id.strip()
description = None
data.append(seq_constructor(seq, id=id,
description=description))
return cls(data, validate=validate)
def __init__(self, seqs, validate=False):
self._data = seqs
self._id_to_index = {}
for i, seq in enumerate(self._data):
id = seq.id
if id in self:
raise SequenceCollectionError(
"All sequence ids must be unique, but "
"id %s is present multiple times." % id)
else:
self._id_to_index[seq.id] = i
# This is bad because we're making a second pass through the sequence
# collection to validate. We'll want to avoid this, but it's tricky
# because different subclasses will want to define their own is_valid
# methods.
if validate and not self.is_valid():
raise SequenceCollectionError(
"%s failed to validate." % self.__class__.__name__)
def __contains__(self, id):
r"""The in operator.
Parameters
----------
id : str
The id to look up in the `SequenceCollection`.
Returns
-------
bool
Indicates whether `id` corresponds to a sequence id
in the `SequenceCollection`.
.. shownumpydoc
"""
return id in self._id_to_index
def __eq__(self, other):
r"""The equality operator.
Parameters
----------
other : `SequenceCollection`
The `SequenceCollection` to test for equality against.
Returns
-------
bool
Indicates whether `self` and `other` are equal.
Notes
-----
`SequenceCollection` objects are equal if they are the same type,
contain the same number of sequences, and if each of the
`skbio.sequence.BiologicalSequence` objects, in order, are equal.
.. shownumpydoc
"""
if self.__class__ != other.__class__:
return False
elif len(self) != len(other):
return False
else:
for self_seq, other_seq in zip(self, other):
if self_seq != other_seq:
return False
return True
def __getitem__(self, index):
r"""The indexing operator.
Parameters
----------
index : int, str
The position or sequence id of the
`skbio.sequence.BiologicalSequence` to return from the
`SequenceCollection`.
Returns
-------
`skbio.sequence.BiologicalSequence`
The `skbio.sequence.BiologicalSequence` at the specified
index in the `SequenceCollection`.
Examples
--------
>>> from skbio.alignment import SequenceCollection
>>> from skbio.sequence import DNA
>>> sequences = [DNA('ACCGT', id="seq1"),
... DNA('AACCGGT', id="seq2")]
>>> s1 = SequenceCollection(sequences)
>>> s1[0]
<DNASequence: ACCGT (length: 5)>
>>> s1["seq1"]
<DNASequence: ACCGT (length: 5)>
.. shownumpydoc
"""
if isinstance(index, str):
return self.get_seq(index)
else:
return self._data[index]
def __iter__(self):
r"""The iter operator.
Returns
-------
iterator
`skbio.sequence.BiologicalSequence` iterator for the
`SequenceCollection`.
.. shownumpydoc
"""
return iter(self._data)
def __len__(self):
r"""The len operator.
Returns
-------
int
The number of sequences in the `SequenceCollection`.
.. shownumpydoc
"""
return self.sequence_count()
def __ne__(self, other):
r"""The inequality operator.
Parameters
----------
other : `SequenceCollection`
Returns
-------
bool
Indicates whether self and other are not equal.
Notes
-----
See `SequenceCollection.__eq__` for a description of what it means for
a pair of `SequenceCollection` objects to be equal.
.. shownumpydoc
"""
return not self.__eq__(other)
def __repr__(self):
r"""The repr method.
Returns
-------
str
Returns a string representation of the object.
Notes
-----
String representation contains the class name, the number of sequences
in the `SequenceCollection` (n), and the mean and standard deviation
sequence length.
Examples
--------
>>> from skbio.alignment import SequenceCollection
>>> from skbio.sequence import DNA
>>> sequences = [DNA('ACCGT', id="seq1"),
... DNA('AACCGGT', id="seq2")]
>>> s1 = SequenceCollection(sequences)
>>> print(repr(s1))
<SequenceCollection: n=2; mean +/- std length=6.00 +/- 1.00>
.. shownumpydoc
"""
cn = self.__class__.__name__
count, center, spread = self.distribution_stats()
return "<%s: n=%d; mean +/- std length=%.2f +/- %.2f>" \
% (cn, count, center, spread)
def __reversed__(self):
"""The reversed method.
Returns
-------
iterator
`skbio.sequence.BiologicalSequence` iterator for the
`SequenceCollection` in reverse order.
.. shownumpydoc
"""
return reversed(self._data)
def __str__(self):
r"""The str method.
Returns
-------
str
Fasta-formatted string of all sequences in the object.
.. shownumpydoc
"""
return self.to_fasta()
def distances(self, distance_fn):
"""Compute distances between all pairs of sequences
Parameters
----------
distance_fn : function
Function for computing the distance between a pair of sequences.
This must take two sequences as input (as
`skbio.sequence.BiologicalSequence` objects) and return a
single integer or float value.
Returns
-------
skbio.DistanceMatrix
Matrix containing the distances between all pairs of sequences.
Raises
------
skbio.util.exception.BiologicalSequenceError
If ``len(self) != len(other)`` and ``distance_fn`` ==
``scipy.spatial.distance.hamming``.
See Also
--------
skbio.DistanceMatrix
scipy.spatial.distance.hamming
Examples
--------
>>> from scipy.spatial.distance import hamming
>>> from skbio.alignment import SequenceCollection
>>> from skbio.sequence import DNA
>>> seqs = [DNA("ACCGGGTT", id="s1"),
... DNA("ACTTGGTT", id="s2"),
... DNA("ACTAGGTT", id="s3")]
>>> a1 = SequenceCollection(seqs)
>>> print(a1.distances(hamming))
3x3 distance matrix
IDs:
s1, s2, s3
Data:
[[ 0. 0.25 0.25 ]
[ 0.25 0. 0.125]
[ 0.25 0.125 0. ]]
"""
sequence_count = self.sequence_count()
dm = np.zeros((sequence_count, sequence_count))
ids = []
for i in range(sequence_count):
self_i = self[i]
ids.append(self_i.id)
for j in range(i):
dm[i, j] = dm[j, i] = self_i.distance(self[j], distance_fn)
return DistanceMatrix(dm, ids)
def distribution_stats(self, center_f=np.mean, spread_f=np.std):
r"""Return sequence count, and center and spread of sequence lengths
Parameters
----------
center_f : function
Should take a list-like object and return a single value
representing the center of the distribution.
spread_f : function
Should take a list-like object and return a single value
representing the spread of the distribution.
Returns
-------
tuple of (int, float, float)
The sequence count, center of length distribution, spread of length
distribution.
Notes
-----
Alternatives for `center_f` and `spread_f` could be median and median
absolute deviation.
Examples
--------
>>> from skbio.alignment import SequenceCollection
>>> from skbio.sequence import DNA
>>> sequences = [DNA('ACCGT', id="seq1"),
... DNA('AACCGGT', id="seq2")]
>>> s1 = SequenceCollection(sequences)
>>> s1.distribution_stats()
(2, 6.0, 1.0)
"""
if self.is_empty():
return (0, 0.0, 0.0)
else:
sequence_count = self.sequence_count()
sequence_lengths = self.sequence_lengths()
return (sequence_count, center_f(sequence_lengths),
spread_f(sequence_lengths))
def degap(self):
r"""Return a new `SequenceCollection` with all gap characters removed.
Returns
-------
SequenceCollection
A new `SequenceCollection` where
`skbio.sequence.BiologicalSequence.degap` has been called on
each sequence.
Examples
--------
>>> from skbio.alignment import SequenceCollection
>>> from skbio.sequence import DNA
>>> sequences = [DNA('A--CCGT.', id="seq1"),
... DNA('.AACCG-GT.', id="seq2")]
>>> s1 = SequenceCollection(sequences)
>>> s2 = s1.degap()
>>> s2
<SequenceCollection: n=2; mean +/- std length=6.00 +/- 1.00>
"""
return SequenceCollection([seq.degap() for seq in self])
def get_seq(self, id):
r"""Return a sequence from the `SequenceCollection` by its id.
Parameters
----------
id, str
The id of the sequence to return.
Returns
-------
skbio.sequence.BiologicalSequence
The `skbio.sequence.BiologicalSequence` with `id`.
Raises
------
KeyError
If `id` is not in the `SequenceCollection` object.
Examples
--------
>>> from skbio.alignment import SequenceCollection
>>> from skbio.sequence import DNA
>>> sequences = [DNA('A--CCGT.', id="seq1"),
... DNA('.AACCG-GT.', id="seq2")]
>>> s1 = SequenceCollection(sequences)
>>> print(s1['seq1'])
A--CCGT.
"""
return self[self._id_to_index[id]]
def ids(self):
"""Returns the `BiologicalSequence` ids
Returns
-------
list
The ordered list of ids for the
`skbio.sequence.BiologicalSequence` objects in the
`SequenceCollection`.
Examples
--------
>>> from skbio.alignment import SequenceCollection
>>> from skbio.sequence import DNA
>>> sequences = [DNA('A--CCGT.', id="seq1"),
... DNA('.AACCG-GT.', id="seq2")]
>>> s1 = SequenceCollection(sequences)
>>> print(s1.ids())
['seq1', 'seq2']
"""
return [seq.id for seq in self]
def int_map(self, prefix=""):
"""Create an integer-based mapping of sequence ids
Parameters
----------
prefix : str
String prefix for new integer-based ids.
Returns
-------
dict
Mapping of new ids to sequences.
dict
Mapping of new ids to old ids.
Notes
-----
This is useful when writing sequences out for use with programs that
are picky about their sequence ids (e.g., raXML).
The integer-based ids will be strings, for consistency (e.g., if prefix
is passed) and begin at 1.
References
----------
RAxML Version 8: A tool for Phylogenetic Analysis and Post-Analysis of
Large Phylogenies". In Bioinformatics, 2014
Examples
--------
>>> from skbio.alignment import SequenceCollection
>>> from skbio.sequence import DNA
>>> sequences = [DNA('ACCGT', id="seq1"),
... DNA('AACCGGT', id="seq2")]
>>> s1 = SequenceCollection(sequences)
>>> new_id_to_seqs, new_id_to_old_ids = s1.int_map()
>>> print(repr(new_id_to_seqs['1']))
<DNASequence: ACCGT (length: 5)>
>>> print(repr(new_id_to_seqs['2']))
<DNASequence: AACCGGT (length: 7)>
>>> print(new_id_to_old_ids['1'])
seq1
>>> print(new_id_to_old_ids['2'])
seq2
"""
int_keys = []
int_map = []
for i, seq in enumerate(self):
k = ("%s%d" % (prefix, i+1))
int_map.append((k, seq))
int_keys.append((k, seq.id))
return dict(int_map), dict(int_keys)
def is_empty(self):
"""Return True if the SequenceCollection is empty
Returns
-------
bool
``True`` if `self` contains zero sequences, and ``False``
otherwise.
"""
return self.sequence_count() == 0
def is_valid(self):
"""Return True if the SequenceCollection is valid
Returns
-------
bool
``True`` if `self` is valid, and ``False`` otherwise.
Notes
-----
Validity is defined as having no sequences containing characters
outside of their valid character sets.
See Also
--------
skbio.alignment.BiologicalSequence.is_valid
Examples
--------
>>> from skbio.alignment import SequenceCollection
>>> from skbio.sequence import DNA, RNA
>>> sequences = [DNA('ACCGT', id="seq1"),
... DNA('AACCGGT', id="seq2")]
>>> s1 = SequenceCollection(sequences)
>>> print(s1.is_valid())
True
>>> sequences = [RNA('ACCGT', id="seq1"),
... RNA('AACCGGT', id="seq2")]
>>> s1 = SequenceCollection(sequences)
>>> print(s1.is_valid())
False
"""
return self._validate_character_set()
def iteritems(self):
"""Generator of id, sequence tuples
Returns
-------
generator of tuples
Each tuple contains ordered
(`skbio.sequence.BiologicalSequence.id`,
`skbio.sequence.BiologicalSequence`) pairs.
"""
for seq in self:
yield seq.id, seq
def lower(self):
"""Converts all sequences to lowercase
Returns
-------
SequenceCollection
New `SequenceCollection` object where
`skbio.sequence.BiologicalSequence.lower()` has been called
on each sequence.
See Also
--------
skbio.sequence.BiologicalSequence.lower
upper
"""
return self.__class__([seq.lower() for seq in self])
def sequence_count(self):
"""Return the count of sequences in the `SequenceCollection`
Returns
-------
int
The number of sequences in the `SequenceCollection`.
See Also
--------
sequence_lengths
Alignment.sequence_length
"""
return len(self._data)
def k_word_frequencies(self, k, overlapping=True, constructor=str):
"""Return frequencies of length k words for sequences in Alignment
Parameters
----------
k : int
The word length.
overlapping : bool, optional
Defines whether the k-words should be overlapping or not
overlapping. This is only relevant when k > 1.
constructor : type, optional
The constructor for the returned k-words.
Returns
-------
list
List of ``collections.defaultdict`` objects, one for each sequence
in the `Alignment`, representing the frequency of each character in
each sequence of the `Alignment`.
See Also
--------
position_frequencies
Examples
--------
>>> from skbio.alignment import Alignment
>>> from skbio.sequence import DNA
>>> sequences = [DNA('A', id="seq1"),
... DNA('AT', id="seq2"),
... DNA('TTTT', id="seq3")]
>>> s1 = SequenceCollection(sequences)
>>> for freqs in s1.k_word_frequencies(1):
... print(freqs)
defaultdict(<type 'int'>, {'A': 1.0})
defaultdict(<type 'int'>, {'A': 0.5, 'T': 0.5})
defaultdict(<type 'int'>, {'T': 1.0})
>>> for freqs in s1.k_word_frequencies(2):
... print(freqs)
defaultdict(<type 'int'>, {})
defaultdict(<type 'int'>, {'AT': 1.0})
defaultdict(<type 'int'>, {'TT': 1.0})
"""
result = []
for s in self:
result.append(s.k_word_frequencies(k, overlapping, constructor))
return result
def sequence_lengths(self):
"""Return lengths of the sequences in the `SequenceCollection`
Returns
-------
list
The ordered list of sequence lengths.
See Also
--------
sequence_count
"""
return [len(seq) for seq in self]
def to_fasta(self):
"""Return fasta-formatted string representing the `SequenceCollection`
Returns
-------
str
A fasta-formatted string representing the `SequenceCollection`.
See Also
--------
skbio.parse.sequences.parse_fasta
"""
return ''.join([seq.to_fasta() for seq in self._data])
def toFasta(self):
"""Return fasta-formatted string representing the `SequenceCollection`
.. note:: Deprecated in skbio 0.3.0
`SequenceCollection.toFasta` will be removed in skbio 0.2.0,
it is replaced by `SequenceCollection.to_fasta` as the latter
adheres to PEP8 naming conventions. This is necessary to keep
in place now as these objects are sometimes passed into
code that expects a `cogent.alignment.Alignment` object
(e.g., PyNAST), so we need to support the method with this
name.
Returns
-------
str
A fasta-formatted string representing the `SequenceCollection`.
"""
warn("SequenceCollection.toFasta() is deprecated. You should use "
"SequenceCollection.to_fasta().")
return self.to_fasta()
def upper(self):
"""Converts all sequences to uppercase
Returns
-------
SequenceCollection
New `SequenceCollection` object where `BiologicalSequence.upper()`
has been called on each sequence.
See Also
--------
BiologicalSequence.upper
lower
"""
return self.__class__([seq.upper() for seq in self])
def _validate_character_set(self):
"""Return ``True`` if all sequences are valid, ``False`` otherwise
"""
for seq in self:
if not seq.is_valid():
return False
return True
class Alignment(SequenceCollection):
"""Class for storing alignments of biological sequences.
The ``Alignment`` class adds convenience methods to the
``SequenceCollection`` class to make it easy to work with alignments of
biological sequences.
Parameters
----------
seqs : list of `skbio.sequence.BiologicalSequence` objects
The `skbio.sequence.BiologicalSequence` objects to load into
a new `Alignment` object.
validate : bool, optional
If True, runs the `is_valid` method after construction and raises
`SequenceCollectionError` if ``is_valid == False``.
score : float, optional
The score of the alignment, if applicable (usually only if the
alignment was just constructed).
start_end_positions : iterable of two-item tuples, optional
The start and end positions of each input sequence in the alignment,
if applicable (usually only if the alignment was just constructed using
a local alignment algorithm). Note that these should be indexes into
the unaligned sequences, though the `Alignment` object itself doesn't
know about these.
Raises
------
skbio.alignment.SequenceCollectionError
If ``validate == True`` and ``is_valid == False``.
Notes
-----
By definition, all of the sequences in an alignment must be of the same
length. For this reason, an alignment can be thought of as a matrix of
sequences (rows) by positions (columns).
See Also
--------
skbio.sequence.BiologicalSequence
skbio.sequence.NucleotideSequence
skbio.sequence.DNASequence
skbio.sequence.RNASequence
SequenceCollection
skbio.parse.sequences
skbio.parse.sequences.parse_fasta
Examples
--------
>>> from skbio.alignment import Alignment
>>> from skbio.sequence import DNA
>>> sequences = [DNA('A--CCGT', id="seq1"),
... DNA('AACCGGT', id="seq2")]
>>> a1 = Alignment(sequences)
>>> a1
<Alignment: n=2; mean +/- std length=7.00 +/- 0.00>
"""
def __init__(self, seqs, validate=False, score=None,
start_end_positions=None):
super(Alignment, self).__init__(seqs, validate)
if score is not None:
self._score = float(score)
self._start_end_positions = start_end_positions
def distances(self, distance_fn=None):
"""Compute distances between all pairs of sequences
Parameters
----------
distance_fn : function, optional
Function for computing the distance between a pair of sequences.
This must take two sequences as input (as
`skbio.sequence.BiologicalSequence` objects) and return a
single integer or float value. Defaults to
`scipy.spatial.distance.hamming`.
Returns
-------
skbio.DistanceMatrix
Matrix containing the distances between all pairs of sequences.
Raises
------
skbio.util.exception.BiologicalSequenceError
If ``len(self) != len(other)`` and ``distance_fn`` ==
``scipy.spatial.distance.hamming``.
See Also
--------
skbio.DistanceMatrix
scipy.spatial.distance.hamming
Examples
--------
>>> from skbio.alignment import Alignment
>>> from skbio.sequence import DNA
>>> seqs = [DNA("A-CCGGG", id="s1"),
... DNA("ATCC--G", id="s2"),
... DNA("ATCCGGA", id="s3")]
>>> a1 = Alignment(seqs)
>>> print(a1.distances())
3x3 distance matrix
IDs:
s1, s2, s3
Data:
[[ 0. 0.42857143 0.28571429]
[ 0.42857143 0. 0.42857143]
[ 0.28571429 0.42857143 0. ]]
"""
return super(Alignment, self).distances(distance_fn)
def score(self):
"""Returns the score of the alignment.
Returns
-------
float, None
The score of the alignment, or ``None`` if this was not provided on
object construction.
Notes
-----
This value will often be ``None``, as it is generally only going to be
provided on construction if the alignment itself was built within
scikit-bio.
"""
return self._score
def start_end_positions(self):
"""Returns the (start, end) positions for each aligned sequence.
Returns
-------
list, None
The list of sequence start/end positions, or ``None`` if this was
not provided on object construction.
Notes
-----
The start/end positions indicate the range of the unaligned sequences
in the alignment. For example, if local alignment were performed on the
sequences ACA and TACAT, depending on the specific algorithm that was
used to perform the alignment, the start/end positions would likely be:
``[(0,2), (1,3)]``. This indicates that the first and last positions of
the second sequence were not included in the alignment, and the
aligned sequences were therefore:
ACA
ACA
This value will often be ``None``, as it is generally only going to be
provided on construction if the alignment itself was built within
scikit-bio.
"""
return self._start_end_positions
def subalignment(self, seqs_to_keep=None, positions_to_keep=None,
invert_seqs_to_keep=False,
invert_positions_to_keep=False):
"""Returns new `Alignment` that is a subset of the current `Alignment`
Parameters
----------
seqs_to_keep : list, optional
A list of sequence ids to be retained in the resulting
`Alignment`. If this is not passed, the default will be to retain
all sequences.
positions_to_keep : list, optional
A list of position ids to be retained in the resulting
`Alignment`. If this is not passed, the default will be to retain
all positions.
invert_seqs_to_keep : bool, optional
If `True`, the sequences identified in `seqs_to_keep` will be
discarded, rather than retained.
invert_positions_to_keep : bool, optional
If `True`, the sequences identified in `positions_to_keep` will be
discarded, rather than retained.
Returns
-------
Alignment
The specified subalignment.
Examples
--------
>>> from skbio.alignment import Alignment
>>> from skbio.sequence import DNA
>>> seqs = [DNA("A-CCGGG", id="s1"),
... DNA("ATCC--G", id="s2"),
... DNA("ATCCGGA", id="s3")]
>>> a1 = Alignment(seqs)
>>> a1
<Alignment: n=3; mean +/- std length=7.00 +/- 0.00>
>>> a1.subalignment(seqs_to_keep=["s1", "s2"])
<Alignment: n=2; mean +/- std length=7.00 +/- 0.00>
>>> a1.subalignment(seqs_to_keep=["s1", "s2"],
... invert_seqs_to_keep=True)
<Alignment: n=1; mean +/- std length=7.00 +/- 0.00>
>>> a1.subalignment(positions_to_keep=[0, 2, 3, 5])
<Alignment: n=3; mean +/- std length=4.00 +/- 0.00>
>>> a1.subalignment(positions_to_keep=[0, 2, 3, 5],
... invert_positions_to_keep=True)
<Alignment: n=3; mean +/- std length=3.00 +/- 0.00>
>>> a1.subalignment(seqs_to_keep=["s1", "s2"],
... positions_to_keep=[0, 2, 3, 5])
<Alignment: n=2; mean +/- std length=4.00 +/- 0.00>
"""
# if seqs_to_keep was not passed
if seqs_to_keep is None:
# and invert_seqs_to_keep is True
if invert_seqs_to_keep:
# return an empty alignment (because we're inverting the
# default of keeping all sequences)
return self.__class__([])
# else if invert_seqs_to_keep is False
else:
# default to returning all sequences
def keep_seq(i, id):
return True
# else, if seqs_to_keep was passed
else:
seqs_to_keep = set(seqs_to_keep)
# and invert_seqs_to_keep is True
if invert_seqs_to_keep:
# keep only sequences that were not listed in seqs_to_keep
def keep_seq(i, id):
return not (id in seqs_to_keep or
i in seqs_to_keep)
# else if invert_seqs_to_keep is False
else:
# keep only sequences that were listed in seqs_to_keep
def keep_seq(i, id):
return (id in seqs_to_keep or
i in seqs_to_keep)
# if positions_to_keep was not passed
if positions_to_keep is None:
# and invert_positions_to_keep is True
if invert_positions_to_keep:
# return an empty alignment (because we're inverting the
# default of keeping all positions)
return self.__class__([])
# else if invert_positions_to_keep is False
else:
# default to returning all positions
def keep_position(pos):
return True
# else, if positions_to_keep was passed
else:
positions_to_keep = set(positions_to_keep)
# and invert_positions_to_keep is True
if invert_positions_to_keep:
# keep only positions that were not listed in
# positions_to_keep
def keep_position(pos):
return pos not in positions_to_keep
# else if invert_positions_to_keep is False
else:
# keep only sequences that were listed in positions_to_keep
def keep_position(pos):
return pos in positions_to_keep
# prep the result object
result = []
# iterate over sequences
for sequence_index, seq in enumerate(self):
# determine if we're keeping the current sequence
if keep_seq(sequence_index, seq.id):
# if so, iterate over the positions to determine which we're
# keeping, and store them in a new list
new_seq = [c for i, c in enumerate(seq) if keep_position(i)]
# and then pack the resulting sequence into a new
# BiologicalSequence object, of the same type as the current
# object.
# Note: This is bad, we are calling join too much. This
# should be addressed in issue #194.
result.append(seq.__class__(''.join(new_seq),
id=seq.id,
description=seq.description))
# if we're not keeping the current sequence, move on to the next
else:
continue
# pack the result up in the same type of object as the current object
# and return it
return self.__class__(result)
def is_valid(self):
"""Return True if the Alignment is valid
Returns
-------
bool
``True`` if `self` is valid, and ``False`` otherwise.
Notes
-----
Validity is defined as having no sequences containing characters
outside of their valid character sets, and all sequences being of equal
length.
See Also
--------
skbio.alignment.BiologicalSequence.is_valid
Examples
--------
>>> from skbio.alignment import Alignment
>>> from skbio.sequence import DNA, RNA
>>> sequences = [DNA('ACCGT--', id="seq1"),
... DNA('AACCGGT', id="seq2")]
>>> a1 = Alignment(sequences)
>>> a1.is_valid()
True
>>> sequences = [DNA('ACCGT', id="seq1"),
... DNA('AACCGGT', id="seq2")]
>>> a1 = Alignment(sequences)
>>> print(a1.is_valid())
False
>>> sequences = [RNA('ACCGT--', id="seq1"),
... RNA('AACCGGT', id="seq2")]
>>> a1 = Alignment(sequences)
>>> print(a1.is_valid())
False
"""
return super(Alignment, self).is_valid() and self._validate_lengths()
def iter_positions(self, constructor=None):
"""Generator of Alignment positions (i.e., columns)
Parameters
----------
constructor : type, optional
Constructor function for creating the positional values. By
default, these will be the same type as corresponding
`skbio.sequence.BiologicalSequence` in the
`SequenceCollection` object, but you can pass a
`skbio.sequence.BiologicalSequence` class here to ensure
that they are all of consistent type, or ``str`` to have them
returned as strings.
Returns
-------
GeneratorType
Generator of lists of positional values in the
`SequenceCollection` (effectively the transpose of the alignment).
See Also
--------
iter
Examples
--------
>>> from skbio.alignment import Alignment
>>> from skbio.sequence import DNA
>>> sequences = [DNA('ACCGT--', id="seq1"),
... DNA('AACCGGT', id="seq2")]
>>> a1 = Alignment(sequences)
>>> for position in a1.iter_positions():
... print(position)
[<DNASequence: A (length: 1)>, <DNASequence: A (length: 1)>]
[<DNASequence: C (length: 1)>, <DNASequence: A (length: 1)>]
[<DNASequence: C (length: 1)>, <DNASequence: C (length: 1)>]
[<DNASequence: G (length: 1)>, <DNASequence: C (length: 1)>]
[<DNASequence: T (length: 1)>, <DNASequence: G (length: 1)>]
[<DNASequence: - (length: 1)>, <DNASequence: G (length: 1)>]
[<DNASequence: - (length: 1)>, <DNASequence: T (length: 1)>]
>>> for position in a1.iter_positions(constructor=str):
... print(position)
['A', 'A']
['C', 'A']
['C', 'C']
['G', 'C']
['T', 'G']
['-', 'G']
['-', 'T']
"""
if constructor is None:
def constructor(s):
return s
for i in range(self.sequence_length()):
position = [constructor(seq[i]) for seq in self]
yield position
def majority_consensus(self, constructor=None):
"""Return the majority consensus sequence for the `Alignment`
Parameters
----------
constructor : function, optional
Constructor function for creating the consensus sequence. By
default, this will be the same type as the first sequence in the
`Alignment`.
Returns
-------
skbio.sequence.BiologicalSequence
The consensus sequence of the `Alignment`. In other words, at each
position the most common character is chosen, and those characters
are combined to create a new sequence.
Notes
-----
If there are two characters that are equally abundant in the sequence
at a given position, the choice of which of those characters will be
present at that position in the result is arbitrary.
Examples
--------
>>> from skbio.alignment import Alignment
>>> from skbio.sequence import DNA
>>> sequences = [DNA('AC--', id="seq1"),
... DNA('AT-C', id="seq2"),
... DNA('TT-C', id="seq3")]
>>> a1 = Alignment(sequences)
>>> a1.majority_consensus()
<DNASequence: AT-C (length: 4)>
>>> a1.majority_consensus(constructor=str)
'AT-C'
"""
# handle empty Alignment case
if self.is_empty():
return ''
if constructor is None:
constructor = self[0].__class__
result = []
for c in self.position_counters():
# Counter.most_common returns an ordered list of the
# n most common (sequence, count) items in Counter. Here
# we set n=1, and take only the character, not the count.
result.append(c.most_common(1)[0][0])
result = ''.join(result)
return constructor(result)
def omit_gap_positions(self, maximum_gap_frequency):
"""Returns Alignment with positions filtered based on gap frequency
Parameters
----------
maximum_gap_frequency : float
The maximum fraction of the sequences that can contain a gap at a
given position for that position to be retained in the resulting
`Alignment`.
Returns
-------
Alignment
The subalignment containing only the positions with gaps in fewer
than `maximum_gap_frequency` fraction of the sequences.
Examples
--------
>>> from skbio.alignment import Alignment
>>> from skbio.sequence import DNA
>>> sequences = [DNA('AC--', id="seq1"),
... DNA('AT-C', id="seq2"),
... DNA('TT-C', id="seq3")]
>>> a1 = Alignment(sequences)
>>> a2 = a1.omit_gap_positions(0.50)
>>> a2
<Alignment: n=3; mean +/- std length=3.00 +/- 0.00>
>>> print(a2[0])
AC-
>>> print(a2[1])
ATC
>>> print(a2[2])
TTC
"""
# handle empty Alignment case
if self.is_empty():
return self.__class__([])
position_frequencies = self.position_frequencies()
gap_alphabet = self[0].gap_alphabet()
positions_to_keep = []
for i, f in enumerate(position_frequencies):
gap_frequency = sum([f[c] for c in gap_alphabet])
if gap_frequency <= maximum_gap_frequency:
positions_to_keep.append(i)
return self.subalignment(positions_to_keep=positions_to_keep)
def omit_gap_sequences(self, maximum_gap_frequency):
"""Returns Alignment with sequences filtered based on gap frequency
Parameters
----------
maximum_gap_frequency : float
The maximum fraction of the positions that can contain a gap in a
given sequence for that sequence to be retained in the resulting
`Alignment`.
Returns
-------
Alignment
The subalignment containing only the sequences with gaps in fewer
than `maximum_gap_frequency` fraction of the positions.
Examples
--------
>>> from skbio.alignment import Alignment
>>> from skbio.sequence import DNA
>>> sequences = [DNA('AC--', id="seq1"),
... DNA('AT-C', id="seq2"),
... DNA('TT-C', id="seq3")]
>>> a1 = Alignment(sequences)
>>> a2 = a1.omit_gap_sequences(0.49)
>>> a2
<Alignment: n=2; mean +/- std length=4.00 +/- 0.00>
>>> print(a2[0])
AT-C
>>> print(a2[1])
TT-C
"""
# handle empty Alignment case
if self.is_empty():
return self.__class__([])
base_frequencies = self.k_word_frequencies(k=1)
gap_alphabet = self[0].gap_alphabet()
seqs_to_keep = []
for seq, f in zip(self, base_frequencies):
gap_frequency = sum([f[c] for c in gap_alphabet])
if gap_frequency <= maximum_gap_frequency:
seqs_to_keep.append(seq.id)
return self.subalignment(seqs_to_keep=seqs_to_keep)
def position_counters(self):
"""Return collection.Counter object for positions in Alignment
Returns
-------
list
List of ``collection.Counter`` objects, one for each position in
the `Alignment`.
See Also
--------
position_frequencies
position_entropies
Examples
--------
>>> from skbio.alignment import Alignment
>>> from skbio.sequence import DNA
>>> sequences = [DNA('AC--', id="seq1"),
... DNA('AT-C', id="seq2"),
... DNA('TT-C', id="seq3")]
>>> a1 = Alignment(sequences)
>>> for counter in a1.position_counters():
... print(counter)
Counter({'A': 2, 'T': 1})
Counter({'T': 2, 'C': 1})
Counter({'-': 3})
Counter({'C': 2, '-': 1})
"""
return [Counter(p) for p in self.iter_positions(constructor=str)]
def position_frequencies(self):
"""Return frequencies of characters for positions in Alignment
Returns
-------
list
List of ``collection.defaultdict`` objects, one for each position
in the `Alignment`, representing the frequency of each character in
the `Alignment` at that position.
See Also
--------
position_counters
position_entropies
k_word_frequencies
Examples
--------
>>> from skbio.alignment import Alignment
>>> from skbio.sequence import DNA
>>> sequences = [DNA('AC--', id="seq1"),
... DNA('AT-C', id="seq2"),
... DNA('TT-C', id="seq3")]
>>> a1 = Alignment(sequences)
>>> position_freqs = a1.position_frequencies()
>>> print(round(position_freqs[0]['A'],3))
0.667
>>> print(round(position_freqs[1]['A'],3))
0.0
"""
result = []
# handle the empty Alignment case
if self.is_empty():
return result
count = 1 / self.sequence_count()
for p in self.iter_positions(constructor=str):
current_freqs = defaultdict(float)
for c in p:
current_freqs[c] += count
result.append(current_freqs)
return result
def position_entropies(self, base=None,
nan_on_non_standard_chars=True):
"""Return Shannon entropy of positions in Alignment
Parameters
----------
base : float, optional
log base for entropy calculation. If not passed, default will be e
(i.e., natural log will be computed).
nan_on_non_standard_chars : bool, optional
if True, the entropy at positions containing characters outside of
the first sequence's `iupac_standard_characters` will be `np.nan`.
This is useful, and the default behavior, as it's not clear how a
gap or degenerate character should contribute to a positional
entropy. This issue was described in [1]_.
Returns
-------
list
List of floats of Shannon entropy at `Alignment` positions. Shannon
entropy is defined in [2]_.
See Also
--------
position_counters
position_frequencies
References
----------
.. [1] Identifying DNA and protein patterns with statistically
significant alignments of multiple sequences.
Hertz GZ, Stormo GD.
Bioinformatics. 1999 Jul-Aug;15(7-8):563-77.
.. [2] A Mathematical Theory of Communication
CE Shannon
The Bell System Technical Journal (1948).
Examples
--------
>>> from skbio.alignment import Alignment
>>> from skbio.sequence import DNA
>>> sequences = [DNA('AC--', id="seq1"),
... DNA('AT-C', id="seq2"),
... DNA('TT-C', id="seq3")]
>>> a1 = Alignment(sequences)
>>> print(a1.position_entropies())
[0.63651416829481278, 0.63651416829481278, nan, nan]
"""
result = []
# handle empty Alignment case
if self.is_empty():
return result
iupac_standard_characters = self[0].iupac_standard_characters()
for f in self.position_frequencies():
if (nan_on_non_standard_chars and
len(viewkeys(f) - iupac_standard_characters) > 0):
result.append(np.nan)
else:
result.append(entropy(list(f.values()), base=base))
return result
def sequence_length(self):
"""Return the number of positions in Alignment
Returns
-------
int
The number of positions in `Alignment`.
See Also
--------
sequence_lengths
sequence_count
Examples
--------
>>> from skbio.alignment import Alignment
>>> from skbio.sequence import DNA
>>> sequences = [DNA('AC--', id="seq1"),
... DNA('AT-C', id="seq2"),
... DNA('TT-C', id="seq3")]
>>> a1 = Alignment(sequences)
>>> a1.sequence_length()
4
"""
# handle the empty Alignment case
if self.is_empty():
return 0
else:
return len(self._data[0])
def to_phylip(self, map_labels=False, label_prefix=""):
"""Return phylip-formatted string representing the `SequenceCollection`
Returns
-------
str
A phylip-formatted string representing the `SequenceCollection`.
"""
if not self._validate_lengths():
raise SequenceCollectionError("PHYLIP-formatted string can only "
"be generated if all sequences are "
"of equal length.")
if self.is_empty():
raise SequenceCollectionError("PHYLIP-formatted string can only "
"be generated if there is at least "
"one sequence in the Alignment.")
sequence_length = self.sequence_length()
if sequence_length == 0:
raise SequenceCollectionError("PHYLIP-formatted string can only "
"be generated if there is at least "
"one position in the Alignment.")
ids = self.ids()
sequence_count = self.sequence_count()
result = ["%d %d" % (sequence_count, sequence_length)]
if map_labels:
_, new_id_to_old_id = self.int_map(prefix=label_prefix)
old_id_to_new_id = {v: k for k, v in new_id_to_old_id.items()}
else:
new_id_to_old_id = {seq_id: seq_id for seq_id in ids}
old_id_to_new_id = new_id_to_old_id
for seq_id in ids:
new_id = old_id_to_new_id[seq_id]
seq = self[seq_id]
result.append("%s %s" % (new_id, str(seq)))
return '\n'.join(result), new_id_to_old_id
def _validate_lengths(self):
"""Return ``True`` if all sequences same length, ``False`` otherwise
"""
seq1_length = self.sequence_length()
for seq in self:
if seq1_length != len(seq):
return False
return True
class StockholmAlignment(Alignment):
"""Contains the metadata information in a Stockholm file alignment
Parameters
----------
seqs : list of `skbio.sequence.BiologicalSequence` objects
The `skbio.sequence.BiologicalSequence` objects to load.
gf : dict, optional
GF info in the format {feature: info}
gs : dict of dicts, optional
GS info in the format {feature: {seqlabel: info}}
gr : dict of dicts, optional
GR info in the format {feature: {seqlabel: info}}
gc : dict, optional
GC info in the format {feature: info}
Notes
-----
The Stockholm format is described in [1]_ and [2]_.
If there are multiple references, include information for each R* line
as a list, with reference 0 information in position 0 for all lists,
etc. This list will be broken up into the appropriate bits for each
reference on string formatting.
If there are multiple trees included, use a list to store identifiers
and trees, with position 0 holding identifier for tree in position 0,
etc.
References
----------
.. [1] http://sonnhammer.sbc.su.se/Stockholm.html
.. [2] http://en.wikipedia.org/wiki/Stockholm_format
Examples
--------
Assume we have a basic stockholm file with the following contents::
# STOCKHOLM 1.0
seq1 ACC--G-GGGU
seq2 TCC--G-GGGA
#=GC SS_cons (((.....)))
//
>>> from skbio.sequence import RNA
>>> from skbio.alignment import StockholmAlignment
>>> from StringIO import StringIO
>>> sto_in = StringIO("# STOCKHOLM 1.0\\n"
... "seq1 ACC--G-GGGU\\nseq2 TCC--G-GGGA\\n"
... "#=GC SS_cons (((.....)))\\n//")
>>> sto_records = StockholmAlignment.from_file(sto_in, RNA)
>>> sto = next(sto_records)
>>> print(sto)
# STOCKHOLM 1.0
seq1 ACC--G-GGGU
seq2 TCC--G-GGGA
#=GC SS_cons (((.....)))
//
>>> sto.gc
{'SS_cons': '(((.....)))'}
We can also write out information by instantiating the StockholmAlignment
object and then printing it.
>>> from skbio.sequence import RNA
>>> from skbio.alignment import StockholmAlignment
>>> seqs = [RNA("ACC--G-GGGU", id="seq1"),
... RNA("TCC--G-GGGA", id="seq2")]
>>> gf = {
... "RT": ["TITLE1", "TITLE2"],
... "RA": ["Auth1;", "Auth2;"],
... "RL": ["J Mol Biol", "Cell"],
... "RM": ["11469857", "12007400"]}
>>> sto = StockholmAlignment(seqs, gf=gf)
>>> print(sto)
# STOCKHOLM 1.0
#=GF RN [1]
#=GF RM 11469857
#=GF RT TITLE1
#=GF RA Auth1;
#=GF RL J Mol Biol
#=GF RN [2]
#=GF RM 12007400
#=GF RT TITLE2
#=GF RA Auth2;
#=GF RL Cell
seq1 ACC--G-GGGU
seq2 TCC--G-GGGA
//
"""
def __init__(self, seqs, gf=None, gs=None, gr=None, gc=None,
validate=False):
self.gf = gf if gf else {}
self.gs = gs if gs else {}
self.gr = gr if gr else {}
self.gc = gc if gc else {}
super(StockholmAlignment, self).__init__(seqs, validate)
def __str__(self):
"""Parses StockholmAlignment into a string with stockholm format
Returns
-------
str
Stockholm formatted string containing all information in the object
Notes
-----
If references are included in GF data, the RN lines are automatically
generated if not provided.
"""
# find length of leader info needed to make file pretty
# 10 comes from the characters for '#=GF ' and the feature after label
infolen = max(len(seq.id) for seq in self._data) + 10
GF_lines = []
GS_lines = []
GC_lines = []
# NOTE: EVERYTHING MUST BE COERECED TO STR in case int or float passed
# add GF information if applicable
if self.gf:
skipfeatures = set(("NH", "RC", "RM", "RN", "RA", "RL"))
for feature, value in self.gf.items():
# list of features to skip and parse special later
if feature in skipfeatures:
continue
# list of features to parse special
elif feature == "TN":
# trees must be in proper order of identifier then tree
ident = value if isinstance(value, list) else [value]
tree = self.gf["NH"] if isinstance(self.gf["NH"], list) \
else [self.gf["NH"]]
for ident, tree in zip(self.gf["TN"], self.gf["NH"]):
GF_lines.append(' '.join(["#=GF", "TN", str(ident)]))
GF_lines.append(' '.join(["#=GF", "NH", str(tree)]))
elif feature == "RT":
# make sure each reference block stays together
# set up lists to zip in case some bits are missing
# create rn list if needed
default_none = [0]*len(value)
rn = self.gf.get("RN", ["[%i]" % x for x in
range(1, len(value)+1)])
rm = self.gf.get("RM", default_none)
rt = self.gf.get("RT", default_none)
ra = self.gf.get("RA", default_none)
rl = self.gf.get("RL", default_none)
rc = self.gf.get("RC", default_none)
# order: RN, RM, RT, RA, RL, RC
for n, m, t, a, l, c in zip(rn, rm, rt, ra, rl, rc):
GF_lines.append(' '.join(["#=GF", "RN", n]))
if m:
GF_lines.append(' '.join(["#=GF", "RM", str(m)]))
if t:
GF_lines.append(' '.join(["#=GF", "RT", str(t)]))
if a:
GF_lines.append(' '.join(["#=GF", "RA", str(a)]))
if l:
GF_lines.append(' '.join(["#=GF", "RL", str(l)]))
if c:
GF_lines.append(' '.join(["#=GF", "RC", str(c)]))
else:
# normal addition for everything else
if not isinstance(value, list):
value = [value]
for val in value:
GF_lines.append(' '.join(["#=GF", feature, str(val)]))
# add GS information if applicable
if self.gs:
for feature in self.gs:
for seqname in self.gs[feature]:
GS_lines.append(' '.join(["#=GS", seqname, feature,
str(self.gs[feature][seqname])]))
# add GC information if applicable
if self.gc:
for feature, value in viewitems(self.gc):
leaderinfo = ' '.join(["#=GC", feature])
spacer = ' ' * (infolen - len(leaderinfo))
GC_lines.append(spacer.join([leaderinfo,
str(self.gc[feature])]))
sto_lines = ["# STOCKHOLM 1.0"] + GF_lines + GS_lines
# create seq output along with GR info if applicable
for label, seq in self.iteritems():
spacer = ' ' * (infolen - len(label))
sto_lines.append(spacer.join([label, str(seq)]))
# GR info added for sequence
for feature in viewkeys(self.gr):
value = self.gr[feature][label]
leaderinfo = ' '.join(['#=GR', label, feature])
spacer = ' ' * (infolen - len(leaderinfo))
sto_lines.append(spacer.join([leaderinfo, value]))
sto_lines.extend(GC_lines)
# add final slashes to end of file
sto_lines.append('//')
return '\n'.join(sto_lines)
def to_file(self, out_f):
r"""Save the alignment to file in text format.
Parameters
----------
out_f : file-like object or filename
File-like object to write serialized data to, or name of
file. If it's a file-like object, it must have a ``write``
method, and it won't be closed. Else, it is opened and
closed after writing.
See Also
--------
from_file
"""
with open_file(out_f, 'w') as out_f:
out_f.write(self.__str__())
@staticmethod
def _parse_gf_info(lines):
"""Takes care of parsing GF lines in stockholm plus special cases"""
parsed = defaultdict(list)
# needed for making each multi-line RT and NH one string
rt = []
nh = []
lastline = ""
for line in lines:
try:
init, feature, content = line.split(None, 2)
except ValueError:
raise StockholmParseError("Malformed GF line encountered!"
"\n%s" % line.split(None, 2))
if init != "#=GF":
raise StockholmParseError("Non-GF line encountered!")
# take care of adding multiline RT to the parsed information
if lastline == "RT" and feature != "RT":
# add rt line to the parsed dictionary
rtline = " ".join(rt)
rt = []
parsed["RT"].append(rtline)
elif feature == "RT":
rt.append(content)
lastline = feature
continue
# Take care of adding multiline NH to the parsed dictionary
elif lastline == "NH" and feature != "NH":
nhline = " ".join(nh)
nh = []
parsed["NH"].append(nhline)
elif feature == "NH":
nh.append(content)
lastline = feature
continue
# add current feature to the parsed information
parsed[feature].append(content)
lastline = feature
# removing unneccessary lists from parsed. Use .items() for py3 support
for feature, value in parsed.items():
# list of multi-line features to join into single string if needed
if feature in ["CC"]:
parsed[feature] = ' '.join(value)
elif len(parsed[feature]) == 1:
parsed[feature] = value[0]
return parsed
@staticmethod
def _parse_gc_info(lines, strict=False, seqlen=-1):
"""Takes care of parsing GC lines in stockholm format"""
parsed = {}
for line in lines:
try:
init, feature, content = line.split(None, 2)
except ValueError:
raise StockholmParseError("Malformed GC line encountered!\n%s"
% line.split(None, 2))
if init != "#=GC":
raise StockholmParseError("Non-GC line encountered!")
# add current feature to the parsed information
if feature in parsed:
if strict:
raise StockholmParseError("Should not have multiple lines "
"with the same feature: %s" %
feature)
else:
parsed[feature] = [content]
# removing unneccessary lists from parsed. Use .items() for py3 support
for feature, value in parsed.items():
parsed[feature] = ''.join(value)
if strict:
if len(value) != seqlen:
raise StockholmParseError("GC must have exactly one char "
"per position in alignment!")
return parsed
@staticmethod
def _parse_gs_gr_info(lines, strict=False, seqlen=-1):
"""Takes care of parsing GS and GR lines in stockholm format"""
parsed = {}
parsetype = ""
for line in lines:
try:
init, label, feature, content = line.split(None, 3)
except ValueError:
raise StockholmParseError("Malformed GS/GR line encountered!"
"\n%s" % line.split(None, 3))
if parsetype == "":
parsetype = init
elif init != parsetype:
raise StockholmParseError("Non-GS/GR line encountered!")
# parse each line, taking into account interleaved format
if feature in parsed and label in parsed[feature]:
# interleaved format, so need list of content
parsed[feature][label].append(content)
else:
parsed[feature] = {label: [content]}
# join all the crazy lists created during parsing
for feature in parsed:
for label, content in parsed[feature].items():
parsed[feature][label] = ''.join(content)
if strict:
if len(parsed[feature][label]) != seqlen:
raise StockholmParseError("GR must have exactly one "
"char per position in the "
"alignment!")
return parsed
@classmethod
def from_file(cls, infile, seq_constructor, strict=False):
r"""yields StockholmAlignment objects from a stockholm file.
Parameters
----------
infile : open file object
An open stockholm file.
seq_constructor : BiologicalSequence object
The biologicalsequence object that corresponds to what the
stockholm file holds. See skbio.sequence
strict : bool (optional)
Turns on strict parsing of GR and GC lines to ensure one char per
position. Default: False
Returns
-------
Iterator of StockholmAlignment objects
Raises
------
skbio.alignment.StockholmParseError
If any lines are found that don't conform to stockholm format
"""
# make sure first line is corect
line = infile.readline()
if not line.startswith("# STOCKHOLM 1.0"):
raise StockholmParseError("Incorrect header found")
gs_lines = []
gf_lines = []
gr_lines = []
gc_lines = []
# OrderedDict used so sequences maintain same order as in file
seqs = OrderedDict()
for line in infile:
line = line.strip()
if line == "" or line.startswith("# S"):
# skip blank lines or secondary headers
continue
elif line == "//":
# parse the record since we are at its end
# build the seuence list for alignment construction
seqs = [seq_constructor(seq, id=_id) for _id, seq in
viewitems(seqs)]
# get length of sequences in the alignment
seqlen = len(seqs[0][1])
# parse information lines
gf = cls._parse_gf_info(gf_lines)
gs = cls._parse_gs_gr_info(gs_lines)
gr = cls._parse_gs_gr_info(gr_lines, strict, seqlen)
gc = cls._parse_gc_info(gc_lines, strict, seqlen)
# yield the actual stockholm object
yield cls(seqs, gf, gs, gr, gc)
# reset all storage variables
gs_lines = []
gf_lines = []
gr_lines = []
gc_lines = []
seqs = OrderedDict()
# add the metadata lines to the proper lists
elif line.startswith("#=GF"):
gf_lines.append(line)
elif line.startswith("#=GS"):
gs_lines.append(line)
elif line.startswith("#=GR"):
gr_lines.append(line)
elif line.startswith("#=GC"):
gc_lines.append(line)
else:
lineinfo = line.split()
# assume sequence since nothing else in format is left
# in case of interleaved format, need to do check
if lineinfo[0] in seqs:
sequence = seqs[lineinfo[0]]
seqs[lineinfo[0]] = ''.join([sequence, lineinfo[1]])
else:
seqs[lineinfo[0]] = lineinfo[1]
|
JWDebelius/scikit-bio
|
skbio/alignment/_alignment.py
|
Python
|
bsd-3-clause
| 67,569
|
[
"scikit-bio"
] |
9b71966d23866cff938b28da6b368cba37ba295564ff53e8bfc2ceef22f4c91a
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
#
# www.genesilico.pl
#
#creates ranked 3D models of macromoleular complexes
#based on experimental restraints and a whole complex shape.
__author__ = "Joanna M. Kasprzak"
__copyright__ = "Copyright 2010, The PyRy3D Project"
__credits__ = ["Janusz Bujnicki"]
__license__ = "GPL"
__version__ = "0.1.0"
__maintainer__ = "Joanna Kasprzak"
__email__ = "jkasp@amu.edu.pl"
__status__ = "Prototype"
import sys, os, glob, shutil
#Internal imports
#BioPython
from Bio import PDB
from Bio.PDB import PDBParser, PDBIO
from Bio.PDB.Atom import Atom
from Bio.PDB.Residue import Residue
from Bio.PDB.Chain import Chain
from Bio.PDB.Model import Model
from Bio.PDB.Structure import Structure
from numpy import array, zeros
from math import sqrt
import optparse
from External_Applications.MinkoFit3D.EMmap import EMmap
from External_Applications.MinkoFit3D.AtomicStructure import AtomicStructure
from External_Applications.MinkoFit3D.corcoe import CorCoe
from External_Applications.MinkoFit3D.ccp4_reader import CCP4
class PyRy3D_IG_Error(Exception): pass
DISTANCES_LIST = [1.0, 2.0, 4.0, 8.0]
class Cluster():
def __init(self):
self.rmsd = 0.0
self.gdt_ts = 0.0
self.di = 0.0
self.val_matrix = None
self.cluster_matrix = None
self.best_scored = []
def iterate_structures(self, structure_set, dist_type, cutoff, struct_nr, score_type, oligo_type):
"""
calculates scores matices
"""
if score_type == "pyry3d": structure_set = sorted(structure_set, key=lambda struct: struct.score, reverse=True)
elif score_type == "ccc": structure_set = sorted(structure_set, key=lambda struct: struct.ccc, reverse=True)
#cluster only struct_nr best scored complexes
self.best_scored = structure_set[:struct_nr+1]
print "best scored ccc: ",len(self.best_scored), struct_nr, len(structure_set)
size = len(self.best_scored)
self.val_matrix = zeros((size, size))
self.cluster_matrix = zeros((size, size))
print "number of structures: ", len(self.best_scored)
for st1 in self.best_scored: #structure_set:
for st2 in self.best_scored: #[index:]:
#print "comparing", st1.filename, st2.filename
value = self.calculate_distance(st1, st2, dist_type, oligo_type)
self.val_matrix[self.best_scored.index(st1), self.best_scored.index(st2)] = value
if value <= cutoff: self.cluster_matrix[self.best_scored.index(st1), self.best_scored.index(st2)] = 1.
else: self.cluster_matrix[self.best_scored.index(st1), self.best_scored.index(st2)] = 0.
#print "measure matrix: ", self.val_matrix
#print "clust matrix: ", self.cluster_matrix
def cluster(self, cutoff, struct_nr, score_type):
"""
performs the clustering procedure, call appropriate methods
"""
print "CLUSTERS, %i best scored models, cut off %i A" %(struct_nr, cutoff)
results = []
clusters = []
size = len(self.best_scored)
while 1:
biggest = []
for a in self.cluster_matrix:
licz = a.sum()
biggest.append(licz)
big_est = array(biggest)
maxim = big_est.max()
if maxim < 1: break
ind = biggest.index(maxim)
niez = self.cluster_matrix[ind,:]
tozer = niez.nonzero()
clusters.append(tozer[0])
#
clust = []
line = "Clustered conformers number "+str(len(tozer[0]))+"\n"
results.append(line)
print line
for ze in tozer[0]:
#for ze in tozer[0]:
clust.append(self.best_scored[ze])
nam = self.best_scored[ze]
if score_type == "pyry3d": line = nam.filename+" "+"score\t"+str(self.best_scored[self.best_scored.index(nam)].score)+"\n"
else: line= nam.filename+" \t"+"score"+str(self.best_scored[self.best_scored.index(nam)].ccc)+"\n"
print line
results.append(line)
#
self.cluster_matrix[:,tozer]= 0
return results, clusters
def calculate_distance(self, st1, st2, dist_type, option):
"""
calculates distanses: RMSD, GDT_TS, TMSCORE
"""
if dist_type.upper() == "RMSD":
if option == "oligo":
return self.calculate_rmsd_oligo(st1, st2)
else:
return self.calculate_rmsd(st1, st2)
elif dist_type.upper() == "GDT_TS": return self.calculate_gdt(st1, st2)
elif dist_type.upper() == "TMSCORE": return self.calculate_TMScore(st1, st2)
def calculate_rmsd_oligo(self, st1, st2):
"""
calculates rmsd for two structures
"""
similarity_map, total_rmsd = [], 0.0
ref_list = list(st2.structure.get_chains())
for chain in st1.structure.get_chains():
min_distance = 0
closest = None
for refch in ref_list:
if (len(chain.child_list) == len(list(refch.child_list))) :
distance = self.calculate_chain_rmsd_matrix(chain, refch, atomtype="CA")
if (min_distance == 0) or (distance < min_distance):
min_distance = distance
closest = refch
if None != closest:
similarity_map.append( [chain, closest] )
ref_list.remove(closest)
sum_dist, sum_length, pair_dist, pair_length = 0., 0., 0., 0.
for c1, c2 in similarity_map:
pair_dist, pair_length = self.calculate_chain_rmsd_matrix(c1, c2, atomtype="CA")
sum_dist += pair_dist
sum_length += pair_length
pair_rmsd = sqrt(pair_dist/pair_length)
print "RMSD type", c1.id, c2.id, pair_rmsd,
self.rmsd = sqrt(sum_dist/sum_length)
print "RMSDtotal", self.rmsd
return self.rmsd
def calculate_rmsd(self, st1, st2):
"""
calculates rms for regular complexes (not oligomers)
"""
coords1, coords2 = [], []
atoms1 = list(st1.structure.get_atoms())
atoms2 = list(st2.structure.get_atoms())
for a in atoms1:
coords1.append(a.coord)
for at in atoms2:
coords2.append(at.coord)
coords1 = array(coords1)
coords2 = array(coords2)
rmsd = 0.0
if len(atoms1) != len(atoms2):
raise PyRy3D_IG_Error("Compared structures %s %s possess different number of atoms"%(st1.filename, st2.filename))
rmsd_mat = coords1 - coords2
rmsd_mat = rmsd_mat**2
rmsd = sqrt(rmsd_mat.sum()/len(rmsd_mat))
#print "RMSD", rmsd
return rmsd
def calculate_chain_rmsd_matrix(self, st1, st2, atomtype):
"""
calculates rmsd for chains
"""
rmsd_total = 0
rmsd_matrix = []
st1_residues = st1.child_list
st2_residues = st2.child_list
resi_pairs = zip(st1_residues, st2_residues)
for resi_pair in resi_pairs:
rmsd_pair = self.calculate_rsmd_for_two_residues(resi_pair, atomtype)
rmsd_matrix.append(rmsd_pair)
return sum(rmsd_matrix), len(rmsd_matrix)
def calculate_rsmd_for_two_residues(self, resi_pair, atomtype):
"""
calculates rsmd for two residues
"""
pair_rmsd = 0.0
resi1 = resi_pair[0]
resi2 = resi_pair[1]
resi2_atoms = resi2.child_list
index = 0
for at in resi1.child_list:
######
######
######
#for Calfas only
if at.name == atomtype:
pair_rmsd += self.calculate_rmsd_for_atoms(at, resi2_atoms[index])
index += 1
return pair_rmsd
def calculate_rmsd_for_atoms(self, at1, at2):
"""
calculates rmsd for any two atoms
"""
rmsd_mat = at1 - at2
rmsd_mat = rmsd_mat**2
#rmsd = sqrt(rmsd_mat.sum()/len(rmsd_mat))
return rmsd_mat #rmsd
def calculate_TMScore(self, st1, st2):
"""
Returns TM score.
by M.Rother
"""
st1_resi_nr = len(list(st1.structure.get_residues()))
st2_resi_nr = len(list(st2.structure.get_residues()))
resi_nr = min(st1_resi_nr, st2_resi_nr)
if st1_resi_nr <15 or st2_resi_nr <15:
print 'WARNING: cannot calculate TM score for structures containing less than 15 residues'
return None
#rmsd_calc = self.calculate_rmsd(st1, st2)
rmsd_matrix = self.calculate_residue_rmsd_matrix(st1, st2)
d0 = self.calculate_TMScore_normalization_factor(resi_nr)
return sum([1.0/(1.0+(dist/d0)**2.0) for dist in rmsd_matrix])/resi_nr
def calculate_residue_rmsd_matrix(self, st1, st2):
"""
rmsd matrix is created for all pairs of residues
"""
rmsd_matrix = []
st1_residues = list(st1.structure.get_residues())
st2_residues = list(st2.structure.get_residues())
resi_pairs = zip(st1_residues, st2_residues)
for resi_pair in resi_pairs:
rmsd_pair = self.calculate_rsmd_for_two_residues(resi_pair)
rmsd_matrix.append(rmsd_pair)
rmsd_total = sqrt(sum(rmsd_matrix)/len(rmsd_matrix))
return rmsd_total #rmsd_matrix
def calculate_TMScore_normalization_factor(self, resi_nr):
"""
Calculates the factor that reduces the influence of structure length.
"""
return 1.24 * (float(resi_nr)-15.0)**(1.0/3.0) - 1.8
def calculate_gdt(self, st1, st2):
"""
Calculates GDT_TS score by counting residues under
distances defined in DISTANCES_LIST.
by T.Puton
"""
resi_rmsd = self.calculate_residue_rmsd_matrix(st1, st2)
resi_sum = 0.0
for dist in DISTANCES_LIST:
resi_sum += self.count_values_under_cutoff(resi_rmsd, dist)
gdt_ts = resi_sum/float(len(DISTANCES_LIST)*len(resi_rmsd))
print "GDT_TS", gdt_ts
return gdt_ts
def count_values_under_cutoff(self, values_list, cutoff):
"""
Counts how many value from given value list is lower or equal to given cutoff.
"""
counter = 0
for x in values_list:
if x <= cutoff: counter += 1
return counter
def save_cluster_matrix(self, outfolder):
"""
clustering matrix is saved to a text file
"""
fh = open(str(outfolder)+"cluster_matrix.txt", "w")
matrix = str(self.cluster_matrix) #.reshape(-1,).tolist())
fh.write(matrix)
fh.close()
def save_measure_matrix(self, outfolder):
"""
RMSD matrix (or matrix containing other scores) is saved into a text file
"""
fh = open(str(outfolder)+"measure_matrix.txt", "w")
matrix = str(self.val_matrix)
fh.write(matrix)
fh.close()
def sort_to_files(self, threshold, clusters, outname, infolder):
"""
Clusters with number of elements above given threshold are saved in separate folders
"""
if threshold > len(clusters[0]): #if all clusters are smaller than size given by user
print "There are no clusters with required number of members. The program will copy five largest clusters instead."
if len(clusters) >= 5:
clusters = clusters[0:5]
self.copy_files_to_separate_folders(clusters, threshold, infolder, outname)
def copy_files_to_separate_folders(self, clusters, threshold, infolder, outname):
"""
Copies files from particular clusters into separate subfolders
"""
count = 0
for cluster in clusters:
if len(cluster) >= threshold:
count += 1
dir = outname+"/cluster_" + str(len(cluster)) + "_number_" + str(count)
if not os.path.exists(dir):
os.makedirs(dir)
for f in cluster:
nam = self.best_scored[f].filename
path_in = os.path.join(infolder,nam)
shutil.copy(path_in,dir)
else: pass
class Cluster_Structure():
def __init__(self, struct, filename, path, score=None):
self.structure = struct
self.filename = filename
self.full_path = path
self.score = score
self.ccc = None
self.dmap = None
def write_pdb(self,structure, filename):
"""
Writting to the pdb_file, saving changed coordinated
"""
fp=open(filename+"out.pdb", "w")
io=PDBIO(1)
io.set_structure(structure)
io.save(fp)
def set_density_map(self, dmap):
self.dmap = dmap
def calculate_ccc(self, map_threshold):
"""
calculates cross-correlation coefficience
"""
st = AtomicStructure()
st.read(self.full_path)
volume = EMmap(self.dmap, float(map_threshold))
volume.read_volume_fast()
corcoe = CorCoe(volume, st)
self.ccc = corcoe.calculate_cc()
print "corcoe--", self.ccc
def extract_structures(folder, scoretype, representation = "fa", density_map = None, map_threshold = None):
"""
uses Bio.PDB to extract structure objects from pdb files
"""
structures = []
pdb_files = glob.glob(str(folder)+'/*.pdb')
if len(pdb_files) == 0: raise PyRy3D_IG_Error("The files you provided are not pdb files")
parser = PDBParser(PERMISSIVE=False, QUIET=True)
for pdbfile in pdb_files:
ffilename = os.path.split(pdbfile)[1]
scorelist = ffilename.split("_")
#score = float(scorelist[1])
#print "##", len(scorelist), scorelist
if len(scorelist) == 3:
score = float(scorelist[0])
elif len(scorelist) == 5:
score = float(scorelist[2])
elif len(scorelist) == 4:
score = float(scorelist[1])
elif len(scorelist) == 6:
score = float(scorelist[3])
else:
print "Input file names do not contain score on expected positions, program assigned 0.0 for all complexes"
score = 0.0 #float(scorelist[1])
structure = parser.get_structure(str(pdbfile), pdbfile)
####
#check representation and change it if the need is
if representation.lower() == "fa":
pass
elif representation.lower() == "ca":
structure = retrieve_ca_model(structure)
elif representation.lower() == "sphere":
structure = retrieve_sphere_model(structure) #, score)
####
struc = Cluster_Structure(structure,ffilename,pdbfile, score)
if density_map: struc.set_density_map(density_map)
if scoretype == "ccc":
struc.calculate_ccc(map_threshold)
#print "SCORE", score
if len(list(structure.get_residues())) == 0:
raise PyRy3D_IG_Error("The file you provided for structure %s is not a valid pdb file"%(structure.id))
structures.append(struc)
del structure
return structures
def retrieve_ca_model(structure):
"""
chains are represented only by main chain atoms (Calfas or C4')
"""
reduced_struct = Structure('clustering_model')
my_model = Model(0)
reduced_struct.add(my_model)
main_chain_atoms = []
for ch in structure[0]:
my_chain = Chain(ch.id)
reduced_struct[0].add(my_chain)
for resi in ch:
for atom in resi:
#print "----", resi.id, resi.get_segid(), ch.id
if atom.get_name() == "CA" or atom.get_name() == "C4'" or atom.get_name() == "C4*":
my_residue = Residue((' ',resi.id[1],' '),resi.get_resname(),' ')
atom = Atom('CA',atom.coord, 0, ' ', ' ', 'CA',atom.get_serial_number())
my_chain.add(my_residue)
my_residue.add(atom)
main_chain_atoms.append(atom)
return reduced_struct
def retrieve_sphere_model(structure): #, score):
"""
each chain is here represented by centre of mass only
"""
sphere_struct = Structure('clustering_model')
my_model = Model(0)
sphere_struct.add(my_model)
#bedzie zmieniona numeracja
chain_mass_centres, index = [], 0
for chain in structure.get_chains():
my_chain = Chain(chain.id)
sphere_struct[0].add(my_chain)
coord = calculate_centre_of_complex(chain)
chain_mass_centres.append(coord)
my_residue = Residue((' ',index,' '),chain.id,' ')
coords = array(coord,'f')
atom = Atom('CA',coords, 0, 0, ' ', 'CA',1)
my_chain.add(my_residue)
my_residue.add(atom)
index += 1
del structure
return sphere_struct
def write_structure(structure, filename):
"""
Writting structure to the pdb_file, saving changed coordinated
Parameters:
-----------
filename : final name of structure file
"""
out = PDBIO()
out.set_structure(structure)
out.save(filename)
def calculate_centre_of_complex(component):
"""
calculates centre of mass for the whole complex
"""
component_centre = [0.,0.,0.]
total_mass = 0
for atom in component.get_atoms():
mass = assign_molweight(atom.get_name())
total_mass += mass
component_centre += atom.coord * mass
component_centre /= total_mass
return component_centre
def assign_molweight(atom_id):
"""
assignes a molecular weight to each atom in a given structure
Raises:
-------
Cmplx_ComponentsError if atom name is not known
"""
#atom_name = self.get_name()[0]
MOLWEIGHTS = {
'?' : 0.0, 'H' : 1.00794, 'C' : 12.0107, 'N' : 14.0067,
'O' : 15.9994, 'P' : 30.973761, 'S' : 32.065}
#atom_id = self.get_name()
for char in atom_id:
if char in MOLWEIGHTS.keys():
atom_name = char
break
if atom_name in MOLWEIGHTS.keys():
molweight = MOLWEIGHTS[atom_name]
return molweight
else: raise PyRyStructureError("Atom not known"+atom_name)
def start_clustering(infolder,score_type,density_map,density_map_threshold,\
measure,threshold,struct_nr,representation,output,oligos, sort):
structures = extract_structures(infolder, score_type, representation, density_map, density_map_threshold)
#print "structures extracted"
c = Cluster()
#print "cluster instance initiated"
if struct_nr == 0:
struct_nr = len(structures)
#print "starting iterating"
c.iterate_structures(structures, measure,int(threshold),int(struct_nr), score_type, oligos)
#print "start clustering"
res,clust = c.cluster(int(threshold),int(struct_nr), score_type)
#print "clustering ended"
#print "sorttofiles"
if sort:
c.sort_to_files(int(sort), clust, output, infolder)
print "saving output"
clustersfile = open(output+"/clusters.txt", "w")
for el in res:
clustersfile.write(el)
clustersfile.close()
|
mdobrychlop/pyry3d_chimera_extension
|
cluster_complexes.py
|
Python
|
gpl-3.0
| 20,280
|
[
"Biopython"
] |
6191fb1d05e6408a10a933536c32af68cc35841a4b30bf161ae8f8b32c08705a
|
#!/usr/bin/env python
"""
Populates the database with the current installations of components
This script assumes that the InstalledComponentsDB, the
ComponentMonitoring service and the Notification service are installed and running
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datetime import datetime
from DIRAC import exit as DIRACexit
from DIRAC import S_OK, gLogger, gConfig
from DIRAC.ConfigurationSystem.Client.CSAPI import CSAPI
from DIRAC.Core.Utilities.DIRACScript import DIRACScript as Script
from DIRAC.FrameworkSystem.Client.NotificationClient import NotificationClient
from DIRAC.FrameworkSystem.Client.SystemAdministratorIntegrator import SystemAdministratorIntegrator
from DIRAC.FrameworkSystem.Client.ComponentMonitoringClient import ComponentMonitoringClient
from DIRAC.FrameworkSystem.Utilities import MonitoringUtilities
from DIRAC.ConfigurationSystem.Client.Helpers.Operations import Operations
from DIRAC.Core.Security.ProxyInfo import getProxyInfo
__RCSID__ = "$Id$"
global excludedHosts
excludedHosts = []
def setExcludedHosts(value):
global excludedHosts
excludedHosts = value.split(",")
return S_OK()
@Script()
def main():
global excludedHosts
Script.registerSwitch(
"e:", "exclude=", "Comma separated list of hosts to be excluded from the scanning process", setExcludedHosts
)
Script.parseCommandLine(ignoreErrors=False)
componentType = ""
# Get my setup
mySetup = gConfig.getValue("DIRAC/Setup")
# Retrieve information from all the hosts
client = SystemAdministratorIntegrator(exclude=excludedHosts)
resultAll = client.getOverallStatus()
if not resultAll["OK"]:
gLogger.error(resultAll["Message"])
DIRACexit(-1)
# Retrieve user installing the component
result = getProxyInfo()
if result["OK"]:
user = result["Value"]["username"]
else:
DIRACexit(-1)
if not user:
user = "unknown"
for host in resultAll["Value"]:
if not resultAll["Value"][host]["OK"]:
# If the host cannot be contacted, exclude it and send message
excludedHosts.append(host)
result = NotificationClient().sendMail(
Operations().getValue("EMail/Production", []),
"Unreachable host",
"\ndirac-populate-component-db: Could not fill the database with the components from unreachable host %s\n"
% host,
)
if not result["OK"]:
gLogger.error("Can not send unreachable host notification mail: %s" % result["Message"])
resultHosts = client.getHostInfo()
if not resultHosts["OK"]:
gLogger.error(resultHosts["Message"])
DIRACexit(-1)
resultInfo = client.getInfo()
if not resultInfo["OK"]:
gLogger.error(resultInfo["Message"])
DIRACexit(-1)
resultMySQL = client.getMySQLStatus()
if not resultMySQL["OK"]:
gLogger.error(resultMySQL["Message"])
DIRACexit(-1)
resultAllDB = client.getDatabases()
if not resultAllDB["OK"]:
gLogger.error(resultAllDB["Message"])
DIRACexit(-1)
resultAvailableDB = client.getAvailableDatabases()
if not resultAvailableDB["OK"]:
gLogger.error(resultAvailableDB["Message"])
DIRACexit(-1)
records = []
finalSet = list(set(resultAll["Value"]) - set(excludedHosts))
for host in finalSet:
hasMySQL = True
result = resultAll["Value"][host]
hostResult = resultHosts["Value"][host]
infoResult = resultInfo["Value"][host]
mySQLResult = resultMySQL["Value"][host]
allDBResult = resultAllDB["Value"][host]
availableDBResult = resultAvailableDB["Value"][host]
if not result["OK"]:
gLogger.error("Host %s: %s" % (host, result["Message"]))
continue
if not hostResult["OK"]:
gLogger.error("Host %s: %s" % (host, hostResult["Message"]))
continue
if not infoResult["OK"]:
gLogger.error("Host %s: %s" % (host, infoResult["Message"]))
continue
if mySQLResult["OK"]:
if not allDBResult["OK"]:
gLogger.error("Host %s: %s" % (host, allDBResult["Message"]))
continue
if not availableDBResult["OK"]:
gLogger.error("Host %s: %s" % (host, availableDBResult["Message"]))
continue
else:
hasMySQL = False
setup = infoResult["Value"]["Setup"]
if setup != mySetup:
continue
cpu = hostResult["Value"]["CPUModel"].strip()
rDict = result["Value"]
# Components other than databases
for compType in rDict:
if componentType and componentType != compType:
continue
for system in rDict[compType]:
components = sorted(rDict[compType][system])
for component in components:
record = {"Installation": {}, "Component": {}, "Host": {}}
if rDict[compType][system][component]["Installed"] and component != "ComponentMonitoring":
runitStatus = str(rDict[compType][system][component]["RunitStatus"])
if runitStatus != "Unknown":
module = str(rDict[compType][system][component]["Module"])
record["Component"]["System"] = system
record["Component"]["Module"] = module
# Transform 'Services' into 'service', 'Agents' into 'agent' ...
record["Component"]["Type"] = compType.lower()[:-1]
record["Host"]["HostName"] = host
record["Host"]["CPU"] = cpu
record["Installation"]["Instance"] = component
record["Installation"]["InstallationTime"] = datetime.utcnow()
record["Installation"]["InstalledBy"] = user
records.append(record)
# Databases
csClient = CSAPI()
cfg = csClient.getCurrentCFG()["Value"]
if hasMySQL:
allDB = allDBResult["Value"]
availableDB = availableDBResult["Value"]
for db in allDB:
# Check for DIRAC only databases
if db in availableDB and db != "InstalledComponentsDB":
# Check for 'installed' databases
isSection = cfg.isSection(
"Systems/"
+ availableDB[db]["System"]
+ "/"
+ cfg.getOption("DIRAC/Setups/" + setup + "/" + availableDB[db]["System"])
+ "/Databases/"
+ db
+ "/"
)
if isSection:
record = {"Installation": {}, "Component": {}, "Host": {}}
record["Component"]["System"] = availableDB[db]["System"]
record["Component"]["Module"] = db
record["Component"]["Type"] = "DB"
record["Host"]["HostName"] = host
record["Host"]["CPU"] = cpu
record["Installation"]["Instance"] = db
record["Installation"]["InstallationTime"] = datetime.utcnow()
record["Installation"]["InstalledBy"] = user
records.append(record)
monitoringClient = ComponentMonitoringClient()
# Add the installations to the database
for record in records:
result = MonitoringUtilities.monitorInstallation(
record["Component"]["Type"],
record["Component"]["System"],
record["Installation"]["Instance"],
record["Component"]["Module"],
record["Host"]["CPU"],
record["Host"]["HostName"],
)
if not result["OK"]:
gLogger.error(result["Message"])
if __name__ == "__main__":
main()
|
ic-hep/DIRAC
|
src/DIRAC/FrameworkSystem/scripts/dirac_populate_component_db.py
|
Python
|
gpl-3.0
| 8,229
|
[
"DIRAC"
] |
cc95f9aa94327be01fb8117faf1f752fa9d49835ef970c0f80d17343041da750
|
import numpy as np
def generate_visits(Nvisits=900, tspan=10, stat=False,
seasonscale=365./5):
'''
Use some very crude approximations for how visits will be spaced out:
- Survey starts at midnight, time = 0.0
- Can only observe at night, time > 0.75 | time < 0.25
- Exposures are clustered around a season w/ a gaussian shape each year
- Field is observable for first half of year, 0 < date < 182
- On average, each field should be hit every 3 days during observable season
Set "stat=True" if you want a plot and a couple basic statistics about the cadence
'''
# generate random times for visit, between [0.75 and 0.25]
time_of_day = np.random.random(Nvisits)/2. - 0.25
date_of_year = np.floor(np.random.normal(loc=365./4., scale=seasonscale, size=Nvisits))
year_of_obs = np.floor(np.random.random(Nvisits) * tspan) * 365.
date_obs = time_of_day + date_of_year + year_of_obs
date_obs.sort()
if stat is True:
print('mean time between visits:')
print(np.mean(date_obs[1:] - date_obs[:-1]))
print('median time between visits:')
print(np.median(date_obs[1:] - date_obs[:-1]))
plt.figure()
_ = plt.hist(date_obs, bins=np.arange(date_obs.min(), date_obs.max(),7),
histtype='stepfilled', color='k')
plt.xlabel('Time (days)')
plt.ylabel('# Visits per Week')
plt.show()
return date_obs
|
RuthAngus/LSST-max
|
code/LSSToy.py
|
Python
|
mit
| 1,472
|
[
"Gaussian",
"VisIt"
] |
ca0fb6f2bb9d4dd2df95434b5715327b061276beb4360b45cd2dee9f36abea34
|
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2016 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
r"""File for accessory procedures in the chem module.
Credit for the libmints vector3 class to Justin M. Turney and
incremental improvements by other psi4 developers.
"""
from __future__ import absolute_import
from __future__ import print_function
import math
import copy
from .exceptions import *
ZERO = 1.0E-14
def norm(v):
"""Compute the magnitude of vector *v*."""
return math.sqrt(sum(v[i] * v[i] for i in range(len(v))))
def add(v, u):
"""Compute sum of vectors *v* and *u*."""
return [u[i] + v[i] for i in range(len(v))]
def sub(v, u):
"""Compute difference of vectors *v* - *u*."""
return [v[i] - u[i] for i in range(len(v))]
def dot(v, u):
"""Compute dot product of vectors *v* and *u*."""
return sum(u[i] * v[i] for i in range(len(v)))
def scale(v, d):
"""Compute by-element scale by *d* of vector *v*."""
return [d * v[i] for i in range(len(v))]
def naivemult(v, u):
"""Compute by-element multiplication of vectors *v* and *u*."""
if len(u) != len(v):
raise ValidationError('naivemult() only defined for vectors of same length \n')
return [u[i] * v[i] for i in range(len(v))]
def normalize(v):
"""Compute normalized vector *v*."""
vmag = norm(v)
return [v[i] / vmag for i in range(len(v))]
def distance(v, u):
"""Compute the distance between points defined by vectors *v* and *u*."""
return norm(sub(v, u))
def cross(v, u):
"""Compute cross product of length 3 vectors *v* and *u*."""
if len(u) != 3 or len(v) != 3:
raise ValidationError('cross() only defined for vectors of length 3\n')
return [v[1] * u[2] - v[2] * u[1],
v[2] * u[0] - v[0] * u[2],
v[0] * u[1] - v[1] * u[0]]
def rotate(v, theta, axis):
"""Rotate length 3 vector *v* about *axis* by *theta* radians."""
if len(v) != 3 or len(axis) != 3:
raise ValidationError('rotate() only defined for vectors of length 3\n')
unitaxis = normalize(copy.deepcopy(axis))
# split into parallel and perpendicular components along axis
parallel = scale(axis, dot(v, axis) / dot(axis, axis))
perpendicular = sub(v, parallel)
# form unit vector perpendicular to parallel and perpendicular
third_axis = perp_unit(axis, perpendicular)
third_axis = scale(third_axis, norm(perpendicular))
result = add(parallel, add(scale(perpendicular, math.cos(theta)), scale(third_axis, math.sin(theta))))
for item in range(len(result)):
if math.fabs(result[item]) < ZERO:
result[item] = 0.0
return result
def perp_unit(u, v):
"""Compute unit vector perpendicular to length 3 vectors *u* and *v*."""
if len(u) != 3 or len(v) != 3:
raise ValidationError('perp_unit() only defined for vectors of length 3\n')
# try cross product
result = cross(u, v)
resultdotresult = dot(result, result)
if resultdotresult < 1.E-16:
# cross product is too small to normalize
# find the largest of this and v
dotprodt = dot(u, u)
dotprodv = dot(v, v)
if dotprodt < dotprodv:
d = copy.deepcopy(v)
dotprodd = dotprodv
else:
d = copy.deepcopy(u)
dotprodd = dotprodt
# see if d is big enough
if dotprodd < 1.e-16:
# choose an arbitrary vector, since the biggest vector is small
result = [1.0, 0.0, 0.0]
return result
else:
# choose a vector perpendicular to d
# choose it in one of the planes xy, xz, yz
# choose the plane to be that which contains the two largest components of d
absd = [math.fabs(d[0]), math.fabs(d[1]), math.fabs(d[2])]
if (absd[1] - absd[0]) > 1.0e-12:
#if absd[0] < absd[1]:
axis0 = 1
if (absd[2] - absd[0]) > 1.0e-12:
#if absd[0] < absd[2]:
axis1 = 2
else:
axis1 = 0
else:
axis0 = 0
if (absd[2] - absd[1]) > 1.0e-12:
#if absd[1] < absd[2]:
axis1 = 2
else:
axis1 = 1
result = [0.0, 0.0, 0.0]
# do the pi/2 rotation in the plane
result[axis0] = d[axis1]
result[axis1] = -1.0 * d[axis0]
result = normalize(result)
return result
else:
# normalize the cross product and return the result
result = scale(result, 1.0 / math.sqrt(resultdotresult))
return result
def determinant(mat):
"""Given 3x3 matrix *mat*, compute the determinat
"""
if len(mat) != 3 or len(mat[0]) != 3 or len(mat[1]) != 3 or len(mat[2]) != 3:
raise ValidationError('determinant() only defined for arrays of dimension 3x3\n')
det = mat[0][0] * mat[1][1] * mat[2][2] - mat[0][2] * mat[1][1] * mat[2][0] + \
mat[0][1] * mat[1][2] * mat[2][0] - mat[0][1] * mat[1][0] * mat[2][2] + \
mat[0][2] * mat[1][0] * mat[2][1] - mat[0][0] * mat[1][2] * mat[2][1]
return det
def diagonalize3x3symmat(M):
"""Given an real symmetric 3x3 matrix *M*, compute the eigenvalues
"""
if len(M) != 3 or len(M[0]) != 3 or len(M[1]) != 3 or len(M[2]) != 3:
raise ValidationError('diagonalize3x3symmat() only defined for arrays of dimension 3x3\n')
A = copy.deepcopy(M) # Symmetric input matrix
Q = [[1, 0, 0], [0, 1, 0], [0, 0, 1]] # Storage buffer for eigenvectors
w = [A[0][0], A[1][1], A[2][2]] # Storage buffer for eigenvalues
# sd, so # Sums of diagonal resp. off-diagonal elements
# s, c, t # sin(phi), cos(phi), tan(phi) and temporary storage
# g, h, z, theta # More temporary storage
# Calculate SQR(tr(A))
sd = 0.0
for i in range(3):
sd += math.fabs(w[i])
sd = sd * sd
# Main iteration loop
for nIter in range(50):
# Test for convergence
so = 0.0
for p in range(3):
for q in range(p + 1, 3):
so += math.fabs(A[p][q])
if so == 0.0:
return w, Q # return eval, evec
if nIter < 4:
thresh = 0.2 * so / (3 * 3)
else:
thresh = 0.0
# Do sweep
for p in range(3):
for q in range(p + 1, 3):
g = 100.0 * math.fabs(A[p][q])
if nIter > 4 and (math.fabs(w[p]) + g == math.fabs(w[p])) and \
(math.fabs(w[q]) + g == math.fabs(w[q])):
A[p][q] = 0.0
elif math.fabs(A[p][q]) > thresh:
# Calculate Jacobi transformation
h = w[q] - w[p]
if math.fabs(h) + g == math.fabs(h):
t = A[p][q] / h
else:
theta = 0.5 * h / A[p][q]
if theta < 0.0:
t = -1.0 / (math.sqrt(1.0 + theta * theta) - theta)
else:
t = 1.0 / (math.sqrt(1.0 + theta * theta) + theta)
c = 1.0 / math.sqrt(1.0 + t * t)
s = t * c
z = t * A[p][q]
# Apply Jacobi transformation
A[p][q] = 0.0
w[p] -= z
w[q] += z
for r in range(p):
t = A[r][p]
A[r][p] = c * t - s * A[r][q]
A[r][q] = s * t + c * A[r][q]
for r in range(p + 1, q):
t = A[p][r]
A[p][r] = c * t - s * A[r][q]
A[r][q] = s * t + c * A[r][q]
for r in range(q + 1, 3):
t = A[p][r]
A[p][r] = c * t - s * A[q][r]
A[q][r] = s * t + c * A[q][r]
# Update eigenvectors
for r in range(3):
t = Q[r][p]
Q[r][p] = c * t - s * Q[r][q]
Q[r][q] = s * t + c * Q[r][q]
return None
def zero(m, n):
""" Create zero matrix"""
new_matrix = [[0 for row in range(n)] for col in range(m)]
return new_matrix
def identity(m):
"""Create identity matrix"""
new_matrix = zero(m, m)
for i in range(m):
new_matrix[i][i] = 1.0
return new_matrix
def show(matrix):
""" Print out matrix"""
for col in matrix:
print(col)
def mscale(matrix, d):
"""Return *matrix* scaled by scalar *d*"""
for i in range(len(matrix)):
for j in range(len(matrix[0])):
matrix[i][j] *= d
return matrix
def mult(matrix1, matrix2):
""" Matrix multiplication"""
if len(matrix1[0]) != len(matrix2):
# Check matrix dimensions
raise ValidationError('Matrices must be m*n and n*p to multiply!')
else:
# Multiply if correct dimensions
try:
new_matrix = zero(len(matrix1), len(matrix2[0]))
for i in range(len(matrix1)):
for j in range(len(matrix2[0])):
for k in range(len(matrix2)):
new_matrix[i][j] += matrix1[i][k] * matrix2[k][j]
except TypeError:
new_matrix = zero(len(matrix1), 1)
for i in range(len(matrix1)):
for k in range(len(matrix2)):
new_matrix[i][0] += matrix1[i][k] * matrix2[k]
return new_matrix
def transpose(matrix):
"""Return matrix transpose"""
if len(matrix[0]) != len(matrix):
# Check matrix dimensions
raise ValidationError('Matrices must be square.')
tmat = [list(i) for i in zip(*matrix)]
return tmat
def matadd(matrix1, matrix2, fac1=1.0, fac2=1.0):
"""Matrix addition"""
if (len(matrix1[0]) != len(matrix2[0])) or (len(matrix1) != len(matrix2)):
raise ValidationError('Matrices must be same dimension to add.')
new_matrix = zero(len(matrix1), len(matrix1[0]))
for i in range(len(matrix1)):
for j in range(len(matrix1[0])):
new_matrix[i][j] = fac1 * matrix1[i][j] + fac2 * matrix2[i][j]
return new_matrix
|
kannon92/psi4
|
psi4/driver/qcdb/vecutil.py
|
Python
|
gpl-2.0
| 11,265
|
[
"Psi4"
] |
820f3b880947a46f1c61817df9299d4ca6adb51dabf0096ff64f60f743a76abc
|
# -*- coding: utf-8 -*-
from __future__ import print_function
import sys
from enum import Enum
import numpy as np
from sklearn import linear_model
from sklearn.metrics import log_loss
from collections import namedtuple
FitLayerData = namedtuple('FitLayerData',
['sublayer', 'train_x', 'train_y',
'validate_x', 'validate_y', 'params'])
class RefFunctionType(Enum):
rfUnknown = -1
rfLinear = 0
rfLinearCov = 1
rfQuadratic = 2
rfCubic = 3
@classmethod
def get_name(cls, value):
if value == cls.rfUnknown:
return 'Unknown'
elif value == cls.rfLinear:
return 'Linear'
elif value == cls.rfLinearCov:
return 'LinearCov'
elif value == cls.rfQuadratic:
return 'Quadratic'
elif value == cls.rfCubic:
return 'Cubic'
elif value == cls.rfHarmonic:
return 'Harmonic'
else:
return 'Unknown'
@staticmethod
def get(arg):
if isinstance(arg, RefFunctionType):
return arg
if arg == 'linear':
return RefFunctionType.rfLinear
elif arg in ('linear_cov', 'lcov'):
return RefFunctionType.rfLinearCov
elif arg in ('quadratic', 'quad'):
return RefFunctionType.rfQuadratic
elif arg == 'cubic':
return RefFunctionType.rfCubic
else:
raise ValueError(arg)
class CriterionType(Enum):
cmpValidate = 1
cmpBias = 2
cmpComb_validate_bias = 4
cmpComb_bias_retrain = 5
@classmethod
def get_name(cls, value):
if value == cls.cmpValidate:
return 'validate error comparison'
elif value == cls.cmpBias:
return 'bias error comparison'
elif value == cls.cmpComb_validate_bias:
return 'bias and validate error comparison'
elif value == cls.cmpComb_bias_retrain:
return 'bias error comparison with retrain'
else:
return 'Unknown'
@staticmethod
def get(arg):
if isinstance(arg, CriterionType):
return arg
elif arg == 'validate':
return CriterionType.cmpValidate
elif arg == 'bias':
return CriterionType.cmpBias
elif arg == 'validate_bias':
return CriterionType.cmpComb_validate_bias
elif arg in ('bias_retrain', 'bias_refit') :
return CriterionType.cmpComb_bias_retrain
else:
raise ValueError(arg)
# *****************************************************************************
# Base neuron class
# *****************************************************************************
class Neuron(object):
"""Base class for neuron
"""
def __init__(self, layer_index, u1_index, u2_index, neuron_index):
self.layer_index = layer_index
self.neuron_index = neuron_index
self.u1_index = u1_index
self.u2_index = u2_index
self.ref_function_type = RefFunctionType.rfUnknown
self.valid = True
self.train_err = sys.float_info.max # neuron error on train data set
self.validate_err = sys.float_info.max # neuron error on validate data set
self.bias_err = sys.float_info.max # bias neuron error
self.transfer = None # transfer function
def need_bias_stuff(self, criterion_type):
if criterion_type == CriterionType.cmpValidate:
return False
return True
def get_error(self, criterion_type):
"""Compute error of the neuron according to specified criterion
"""
if criterion_type == CriterionType.cmpValidate:
return self.validate_err
elif criterion_type == CriterionType.cmpBias:
return self.bias_err
elif criterion_type == CriterionType.cmpComb_validate_bias:
return 0.5*self.bias_err + 0.5*self.validate_err
elif criterion_type == CriterionType.cmpComb_bias_retrain:
return self.bias_err
else:
return sys.float_info.max
def get_regularity_err(self, x, y):
raise NotImplementedError
def get_bias_err(self, train_x, validate_x, train_y, validate_y):
raise NotImplementedError
def get_features_name(self, input_index, feature_names, layers):
if self.layer_index == 0:
s = 'index=inp_{0}'.format(input_index)
if len(feature_names) > 0:
s += ', {0}'.format(feature_names[input_index])
else:
neurons_num = len(layers[self.layer_index-1])
if input_index < neurons_num:
s = 'index=prev_layer_neuron_{0}'.format(input_index)
else:
s = 'index=inp_{0}'.format(input_index - neurons_num)
if len(feature_names) > 0:
s += ', {0}'.format(feature_names[input_index - neurons_num])
return s
def linear_activation(self, x):
return x
def sigmoid_activation(self, x):
return 1.0 / (1.0 + np.exp(-x))
def get_name(self):
raise NotImplementedError
def get_short_name(self):
raise NotImplementedError
# *****************************************************************************
# Polynomial neuron class
# *****************************************************************************
class PolynomNeuron(Neuron):
"""Polynomial neuron class
"""
def __init__(self, layer_index, u1_index, u2_index, ftype, neuron_index, model_class, loss):
super(PolynomNeuron, self).__init__(layer_index, u1_index, u2_index, neuron_index)
self.ftype = ftype
self.fw_size = 0
self.set_type(ftype)
self.w = None
self.wt = None
self.valid = False
self.bias_err = 0
self.train_err = 0
self.validate_err = 0
self.model_class = model_class
if model_class=='classification':
self.fit_function = self._fit_classifier
self.activation = self.sigmoid_activation
else:
self.fit_function = self._fit_regressor
self.activation = self.linear_activation
if loss == 'mse':
self.loss_function = self._mse
self.loss_norm = self._mse_norm
elif loss == 'logloss':
self.loss_function = log_loss
self.loss_norm = self._logloss_norm
else:
raise ValueError('Unexpected loss function type: {}'.format(loss))
def _transfer_linear(self, u1, u2, w):
return self.activation(w[0] + w[1]*u1 + w[2]*u2)
def _transfer_linear_cov(self, u1, u2, w):
return self.activation(w[0] + u1*(w[1] + w[3]*u2) + w[2]*u2)
def _transfer_quadratic(self, u1, u2, w):
return self.activation(w[0] + u1*(w[1] + w[3]*u2 + w[4]*u1) + u2*(w[2] + w[5]*u2))
def _transfer_cubic(self, u1, u2, w):
u1_sq = u1*u1
u2_sq = u2*u2
return self.activation(w[0] + w[1]*u1 + w[2]*u2 + w[3]*u1*u2 + w[4]*u1_sq + w[5]*u2_sq + \
w[6]*u1*u1_sq + w[7]*u1_sq*u2 + w[8]*u1*u2_sq + w[9]*u2*u2_sq)
def set_type(self, new_type):
self.ref_function_type = new_type
if new_type == RefFunctionType.rfLinear:
self.transfer = self._transfer_linear
self.fw_size = 3
elif new_type == RefFunctionType.rfLinearCov:
self.transfer = self._transfer_linear_cov
self.fw_size = 4
elif new_type == RefFunctionType.rfQuadratic:
self.transfer = self._transfer_quadratic
self.fw_size = 6
elif new_type == RefFunctionType.rfCubic:
self.transfer = self._transfer_cubic
self.fw_size = 10
else:
raise ValueError('Unknown type of neuron: {}'.format(new_type))
def _mse(self, y, yp):
return ((y - yp) ** 2).sum()
def _mse_norm(self, y):
return (y ** 2).sum()
def _logloss_norm(self, y):
return np.absolute(y).sum()
def get_regularity_err(self, x, y):
"""Calculation of regularity error
"""
x1 = x[:, self.u1_index]
x2 = x[:, self.u2_index]
yp = self.transfer(x1, x2, self.w)
err = self.loss_function(y, yp) / self.loss_norm(y)
return err
def get_sub_bias_err(self, x, wa, wb):
"""Helper function for calculation of unbiased error
"""
x1 = x[:, self.u1_index]
x2 = x[:, self.u2_index]
yta = self.transfer(x1, x2, wa)
ytb = self.transfer(x1, x2, wb)
s = ((yta - ytb) ** 2).sum()
return s
def get_bias_err(self, train_x, validate_x, train_y, validate_y):
"""Calculation of unbiased error
"""
s = self.get_sub_bias_err(train_x, self.w, self.wt) + \
self.get_sub_bias_err(validate_x, self.w, self.wt)
s2 = (train_y ** 2).sum() + (validate_y ** 2).sum()
err = s/s2
return err
def get_name(self):
if self.ftype == RefFunctionType.rfLinear:
return 'w0 + w1*xi + w2*xj'
elif self.ftype == RefFunctionType.rfLinearCov:
return 'w0 + w1*xi + w2*xj + w3*xi*xj'
elif self.ftype == RefFunctionType.rfQuadratic:
return 'full polynom 2nd degree'
elif self.ftype == RefFunctionType.rfCubic:
return 'full polynom 3rd degree'
else:
return 'Unknown'
def get_short_name(self):
if self.ftype == RefFunctionType.rfLinear:
return 'linear'
elif self.ftype == RefFunctionType.rfLinearCov:
return 'linear cov'
elif self.ftype == RefFunctionType.rfQuadratic:
return 'quadratic'
elif self.ftype == RefFunctionType.rfCubic:
return 'cubic'
else:
return 'Unknown'
def __repr__(self):
return 'PolynomModel {0} - {1}'.format(self.neuron_index, RefFunctionType.get_name(self.ref_function_type))
def describe(self, features, layers):
s = ['PolynomModel {0} - {1}'.format(self.neuron_index, RefFunctionType.get_name(self.ref_function_type)),
'u1: {0}'.format(self.get_features_name(self.u1_index, features, layers)),
'u2: {0}'.format(self.get_features_name(self.u2_index, features, layers)),
'train error: {0}'.format(self.train_err),
'validate error: {0}'.format(self.validate_err),
'bias error: {0}'.format(self.bias_err),
'; '.join(['w{0}={1}'.format(n, self.w[n]) for n in range(self.w.shape[0])]),
'||w||^2={ww}'.format(ww=self.w.mean())
]
return '\n'.join(s)
def get_polynom_inputs(self, ftype, u1_index, u2_index, source):
"""
function set matrix value required to calculate polynom neuron coefficient
by multiple linear regression
"""
u1x = source[:, u1_index]
u2x = source[:, u2_index]
a = np.empty((source.shape[0], self.fw_size), dtype=np.double)
a[:, 0] = 1
a[:, 1] = u1x
a[:, 2] = u2x
if ftype in (RefFunctionType.rfLinearCov,
RefFunctionType.rfQuadratic,
RefFunctionType.rfCubic):
a[:, 3] = u1x * u2x
if ftype in (RefFunctionType.rfQuadratic,
RefFunctionType.rfCubic):
a[:, 3] = u1x * u2x
a[:, 4] = u1x * u1x
a[:, 5] = u2x * u2x
if RefFunctionType.rfCubic == ftype:
a[:, 3] = u1x * u2x
a[:, 4] = u1x * u1x
a[:, 5] = u2x * u2x
a[:, 6] = a[:, 4] * u1x
a[:, 7] = a[:, 4] * u2x
a[:, 8] = a[:, 5] * u1x
a[:, 9] = a[:, 6] * u2x
return a
def _fit_regressor(self, x, y, params):
a = self.get_polynom_inputs(self.ftype, self.u1_index, self.u2_index, x)
reg = linear_model.Ridge(alpha=params['l2'], solver='lsqr')
a2 = a[:, 1:]
reg.fit(a2, y)
w = np.empty((len(reg.coef_) + 1,), dtype=np.double)
w[0] = reg.intercept_
w[1:] = reg.coef_
return w
def _fit_classifier(self, x, y, params):
a = self.get_polynom_inputs(self.ftype, self.u1_index, self.u2_index, x)
clf = linear_model.LogisticRegression(C=1.0/params['l2'])
a2 = a[:, 1:]
clf.fit(a2, y)
w = np.empty((clf.coef_.shape[1] + 1,), dtype=np.double)
w[0] = clf.intercept_
w[1:] = clf.coef_[0, :]
return w
def fit(self, train_x, train_y, validate_x, validate_y, params):
"""
Train the neuron using train and validate sets
"""
self.w = self.fit_function(train_x, train_y, params)
if self.need_bias_stuff(params['criterion_type']):
self.wt = self.fit_function(validate_x, validate_y, params)
self.bias_err = 0
self.valid = True
# calculate neuron errors
if self.need_bias_stuff(params['criterion_type']):
self.bias_err = self.get_bias_err(train_x, validate_x, train_y, validate_y)
self.train_err = self.get_regularity_err(train_x, train_y)
self.validate_err = self.get_regularity_err(validate_x, validate_y)
#***********************************************************************************************************************
# Network layer
#***********************************************************************************************************************
class LayerCreationError(Exception):
"""raised when error happens while layer creation
"""
def __init__(self, message, layer_index):
# Call the base class constructor with the parameters it needs
super(LayerCreationError, self).__init__(message)
self.layer_index = layer_index
class Layer(list):
"""Layer class of multilayered group method of data handling algorithm
"""
def __init__(self, model, layer_index, *args):
list.__init__(self, *args)
self.layer_index = layer_index
self.l_count = model.l_count
self.n_features = model.n_features
self.err = sys.float_info.max
self.train_err = sys.float_info.max
self.valid = True
self.input_index_set = set([])
def add_neuron(self, index_u1, index_u2, ftype, model_class, loss):
"""Add polynomial neuron to the layer
"""
self.add(PolynomNeuron(self.layer_index, index_u1, index_u2, ftype, len(self), model_class, loss))
def __repr__(self):
return 'Layer {0}'.format(self.layer_index)
def describe(self, features, layers):
s = ['*' * 50,
'Layer {0}'.format(self.layer_index),
'*' * 50,
]
for neuron in self:
s.append(neuron.describe(features, layers))
return '\n'.join(s)
def add(self, neuron):
neuron.neuron_index = len(self)
self.append(neuron)
self.input_index_set.add(neuron.u1_index)
self.input_index_set.add(neuron.u2_index)
def delete(self, index):
self.pop(index)
for n in range(index, len(self)):
self[n].neuron_index = n
self.input_index_set.clear()
for neuron in self:
self.input_index_set.add(neuron.u1_index)
self.input_index_set.add(neuron.u2_index)
def fit_layer(fit_layer_data):
sublayer = fit_layer_data.sublayer
for neuron in sublayer:
neuron.fit(fit_layer_data.train_x,
fit_layer_data.train_y,
fit_layer_data.validate_x,
fit_layer_data.validate_y,
fit_layer_data.params)
return sublayer
|
kvoyager/GmdhPy
|
gmdhpy/neuron.py
|
Python
|
mit
| 15,781
|
[
"NEURON"
] |
ddd6062192be2f75857f0698a3df9907d3ae11b3330a3cc614cb6d214158b339
|
#
# The OpenDiamond Platform for Interactive Search
#
# Copyright (c) 2011-2012 Carnegie Mellon University
# All rights reserved.
#
# This software is distributed under the terms of the Eclipse Public
# License, Version 1.0 which can be found in the file named LICENSE.
# ANY USE, REPRODUCTION OR DISTRIBUTION OF THIS SOFTWARE CONSTITUTES
# RECIPIENT'S ACCEPTANCE OF THIS AGREEMENT
#
'''XDR serialization/deserialization for the Diamond wire protocol.'''
# XDR classes are oddly named for consistency with OpenDiamond-Java
# pylint: disable=invalid-name
from opendiamond.rpc import RPCError
from opendiamond.xdr import XDR, XDRStruct
# Default port
PORT = 5872
# Nonce details
NONCE_LEN = 16
NULL_NONCE = b'\x00' * NONCE_LEN
class DiamondRPCFailure(RPCError):
'''Generic Diamond RPC failure.'''
code = 500
class DiamondRPCFCacheMiss(RPCError):
'''Filter code or blob argument missed in the blob cache.'''
code = 501
class DiamondRPCCookieExpired(RPCError):
'''Proffered scope cookie has expired.'''
code = 504
class DiamondRPCSchemeNotSupported(RPCError):
'''URI scheme not supported.'''
code = 505
class XDR_attribute(XDRStruct):
'''An object attribute'''
members = (
'name', XDR.string(),
'value', XDR.opaque(),
)
class XDR_object(XDRStruct):
'''Blast channel object data'''
members = (
'attrs', XDR.array(XDR.struct(XDR_attribute)),
)
class XDR_blob_list(XDRStruct):
'''A list of blob URIs'''
members = (
'uris', XDR.array(XDR.string()),
)
class XDR_filter_config(XDRStruct):
'''Configuration for a single filter'''
members = (
'name', XDR.string(),
'arguments', XDR.array(XDR.string()),
'dependencies', XDR.array(XDR.string()),
'min_score', XDR.double(),
'max_score', XDR.double(),
'code', XDR.string(),
'blob', XDR.string(),
)
class XDR_setup(XDRStruct):
'''Search setup parameters'''
members = (
'cookies', XDR.array(XDR.string()),
'filters', XDR.array(XDR.struct(XDR_filter_config)),
)
class XDR_blob_data(XDRStruct):
'''Blob data to be added to the blob cache'''
members = (
'blobs', XDR.array(XDR.opaque()),
)
class XDR_start(XDRStruct):
'''Start-search parameters'''
members = (
'search_id', XDR.fopaque(36),
'attrs', XDR.optional(XDR.array(XDR.string())),
)
class XDR_stat(XDRStruct):
'''Statistics key-value pair'''
members = (
"name", XDR.string(),
"value", XDR.hyper(),
)
class XDR_filter_stats(XDRStruct):
'''Filter statistics'''
members = (
'name', XDR.string(),
'stats', XDR.array(XDR.struct(XDR_stat)),
)
class XDR_search_stats(XDRStruct):
'''Search statistics'''
members = (
'stats', XDR.array(XDR.struct(XDR_stat)),
'filter_stats', XDR.optional(XDR.array(XDR.struct(XDR_filter_stats))),
)
class XDR_session_var(XDRStruct):
'''Session variable'''
members = (
'name', XDR.string(),
'value', XDR.double(),
)
class XDR_session_vars(XDRStruct):
'''Session variable list'''
members = (
'vars', XDR.array(XDR.struct(XDR_session_var)),
)
class XDR_reexecute(XDRStruct):
'''Reexecute argument'''
members = (
'object_id', XDR.string(),
'attrs', XDR.optional(XDR.array(XDR.string())),
)
class XDR_retrain(XDRStruct):
'''Search retrain parameters'''
members = (
'names', XDR.array(XDR.string()),
'labels', XDR.array(XDR.int()),
'features', XDR.array(XDR.opaque()),
)
class XDR_attribute_list(XDRStruct):
'''Reexecute response'''
members = (
'attrs', XDR.array(XDR.struct(XDR_attribute)),
)
|
cmusatyalab/opendiamond
|
opendiamond/protocol.py
|
Python
|
epl-1.0
| 3,805
|
[
"BLAST"
] |
f5817d8763b1d53aff6619888d590639246c637621a4b85450ba8f0c3c282e69
|
"""Bayesian Gaussian Mixture Models and
Dirichlet Process Gaussian Mixture Models"""
from __future__ import print_function
# Author: Alexandre Passos (alexandre.tp@gmail.com)
# Bertrand Thirion <bertrand.thirion@inria.fr>
#
# Based on mixture.py by:
# Ron Weiss <ronweiss@gmail.com>
# Fabian Pedregosa <fabian.pedregosa@inria.fr>
#
import numpy as np
from scipy.special import digamma as _digamma, gammaln as _gammaln
from scipy import linalg
from scipy.spatial.distance import cdist
from ..externals.six.moves import xrange
from ..utils import check_random_state
from ..utils.extmath import logsumexp, pinvh, squared_norm
from .. import cluster
from .gmm import GMM
def digamma(x):
return _digamma(x + np.finfo(np.float32).eps)
def gammaln(x):
return _gammaln(x + np.finfo(np.float32).eps)
def log_normalize(v, axis=0):
"""Normalized probabilities from unnormalized log-probabilites"""
v = np.rollaxis(v, axis)
v = v.copy()
v -= v.max(axis=0)
out = logsumexp(v)
v = np.exp(v - out)
v += np.finfo(np.float32).eps
v /= np.sum(v, axis=0)
return np.swapaxes(v, 0, axis)
def wishart_log_det(a, b, detB, n_features):
"""Expected value of the log of the determinant of a Wishart
The expected value of the logarithm of the determinant of a
wishart-distributed random variable with the specified parameters."""
l = np.sum(digamma(0.5 * (a - np.arange(-1, n_features - 1))))
l += n_features * np.log(2)
return l + detB
def wishart_logz(v, s, dets, n_features):
"The logarithm of the normalization constant for the wishart distribution"
z = 0.
z += 0.5 * v * n_features * np.log(2)
z += (0.25 * (n_features * (n_features - 1)) * np.log(np.pi))
z += 0.5 * v * np.log(dets)
z += np.sum(gammaln(0.5 * (v - np.arange(n_features) + 1)))
return z
def _bound_wishart(a, B, detB):
"""Returns a function of the dof, scale matrix and its determinant
used as an upper bound in variational approcimation of the evidence"""
n_features = B.shape[0]
logprior = wishart_logz(a, B, detB, n_features)
logprior -= wishart_logz(n_features,
np.identity(n_features),
1, n_features)
logprior += 0.5 * (a - 1) * wishart_log_det(a, B, detB, n_features)
logprior += 0.5 * a * np.trace(B)
return logprior
##############################################################################
# Variational bound on the log likelihood of each class
##############################################################################
def _sym_quad_form(x, mu, A):
"""helper function to calculate symmetric quadratic form x.T * A * x"""
q = (cdist(x, mu[np.newaxis], "mahalanobis", VI=A) ** 2).reshape(-1)
return q
def _bound_state_log_lik(X, initial_bound, precs, means, covariance_type):
"""Update the bound with likelihood terms, for standard covariance types"""
n_components, n_features = means.shape
n_samples = X.shape[0]
bound = np.empty((n_samples, n_components))
bound[:] = initial_bound
if covariance_type in ['diag', 'spherical']:
for k in range(n_components):
d = X - means[k]
bound[:, k] -= 0.5 * np.sum(d * d * precs[k], axis=1)
elif covariance_type == 'tied':
for k in range(n_components):
bound[:, k] -= 0.5 * _sym_quad_form(X, means[k], precs)
elif covariance_type == 'full':
for k in range(n_components):
bound[:, k] -= 0.5 * _sym_quad_form(X, means[k], precs[k])
return bound
class DPGMM(GMM):
"""Variational Inference for the Infinite Gaussian Mixture Model.
DPGMM stands for Dirichlet Process Gaussian Mixture Model, and it
is an infinite mixture model with the Dirichlet Process as a prior
distribution on the number of clusters. In practice the
approximate inference algorithm uses a truncated distribution with
a fixed maximum number of components, but almost always the number
of components actually used depends on the data.
Stick-breaking Representation of a Gaussian mixture model
probability distribution. This class allows for easy and efficient
inference of an approximate posterior distribution over the
parameters of a Gaussian mixture model with a variable number of
components (smaller than the truncation parameter n_components).
Initialization is with normally-distributed means and identity
covariance, for proper convergence.
Parameters
----------
n_components: int, optional
Number of mixture components. Defaults to 1.
covariance_type: string, optional
String describing the type of covariance parameters to
use. Must be one of 'spherical', 'tied', 'diag', 'full'.
Defaults to 'diag'.
alpha: float, optional
Real number representing the concentration parameter of
the dirichlet process. Intuitively, the Dirichlet Process
is as likely to start a new cluster for a point as it is
to add that point to a cluster with alpha elements. A
higher alpha means more clusters, as the expected number
of clusters is ``alpha*log(N)``. Defaults to 1.
thresh : float, optional
Convergence threshold.
n_iter : int, optional
Maximum number of iterations to perform before convergence.
params : string, optional
Controls which parameters are updated in the training
process. Can contain any combination of 'w' for weights,
'm' for means, and 'c' for covars. Defaults to 'wmc'.
init_params : string, optional
Controls which parameters are updated in the initialization
process. Can contain any combination of 'w' for weights,
'm' for means, and 'c' for covars. Defaults to 'wmc'.
Attributes
----------
covariance_type : string
String describing the type of covariance parameters used by
the DP-GMM. Must be one of 'spherical', 'tied', 'diag', 'full'.
n_components : int
Number of mixture components.
weights_ : array, shape (`n_components`,)
Mixing weights for each mixture component.
means_ : array, shape (`n_components`, `n_features`)
Mean parameters for each mixture component.
precs_ : array
Precision (inverse covariance) parameters for each mixture
component. The shape depends on `covariance_type`::
(`n_components`, 'n_features') if 'spherical',
(`n_features`, `n_features`) if 'tied',
(`n_components`, `n_features`) if 'diag',
(`n_components`, `n_features`, `n_features`) if 'full'
converged_ : bool
True when convergence was reached in fit(), False otherwise.
See Also
--------
GMM : Finite Gaussian mixture model fit with EM
VBGMM : Finite Gaussian mixture model fit with a variational
algorithm, better for situations where there might be too little
data to get a good estimate of the covariance matrix.
"""
def __init__(self, n_components=1, covariance_type='diag', alpha=1.0,
random_state=None, thresh=1e-2, verbose=False,
min_covar=None, n_iter=10, params='wmc', init_params='wmc'):
self.alpha = alpha
self.verbose = verbose
super(DPGMM, self).__init__(n_components, covariance_type,
random_state=random_state,
thresh=thresh, min_covar=min_covar,
n_iter=n_iter, params=params,
init_params=init_params)
def _get_precisions(self):
"""Return precisions as a full matrix."""
if self.covariance_type == 'full':
return self.precs_
elif self.covariance_type in ['diag', 'spherical']:
return [np.diag(cov) for cov in self.precs_]
elif self.covariance_type == 'tied':
return [self.precs_] * self.n_components
def _get_covars(self):
return [pinvh(c) for c in self._get_precisions()]
def _set_covars(self, covars):
raise NotImplementedError("""The variational algorithm does
not support setting the covariance parameters.""")
def score_samples(self, X):
"""Return the likelihood of the data under the model.
Compute the bound on log probability of X under the model
and return the posterior distribution (responsibilities) of
each mixture component for each element of X.
This is done by computing the parameters for the mean-field of
z for each observation.
Parameters
----------
X : array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
logprob : array_like, shape (n_samples,)
Log probabilities of each data point in X
responsibilities: array_like, shape (n_samples, n_components)
Posterior probabilities of each mixture component for each
observation
"""
X = np.asarray(X)
if X.ndim == 1:
X = X[:, np.newaxis]
z = np.zeros((X.shape[0], self.n_components))
sd = digamma(self.gamma_.T[1] + self.gamma_.T[2])
dgamma1 = digamma(self.gamma_.T[1]) - sd
dgamma2 = np.zeros(self.n_components)
dgamma2[0] = digamma(self.gamma_[0, 2]) - digamma(self.gamma_[0, 1] +
self.gamma_[0, 2])
for j in range(1, self.n_components):
dgamma2[j] = dgamma2[j - 1] + digamma(self.gamma_[j - 1, 2])
dgamma2[j] -= sd[j - 1]
dgamma = dgamma1 + dgamma2
# Free memory and developers cognitive load:
del dgamma1, dgamma2, sd
if self.covariance_type not in ['full', 'tied', 'diag', 'spherical']:
raise NotImplementedError("This ctype is not implemented: %s"
% self.covariance_type)
p = _bound_state_log_lik(X, self._initial_bound + self.bound_prec_,
self.precs_, self.means_,
self.covariance_type)
z = p + dgamma
z = log_normalize(z, axis=-1)
bound = np.sum(z * p, axis=-1)
return bound, z
def _update_concentration(self, z):
"""Update the concentration parameters for each cluster"""
sz = np.sum(z, axis=0)
self.gamma_.T[1] = 1. + sz
self.gamma_.T[2].fill(0)
for i in range(self.n_components - 2, -1, -1):
self.gamma_[i, 2] = self.gamma_[i + 1, 2] + sz[i]
self.gamma_.T[2] += self.alpha
def _update_means(self, X, z):
"""Update the variational distributions for the means"""
n_features = X.shape[1]
for k in range(self.n_components):
if self.covariance_type in ['spherical', 'diag']:
num = np.sum(z.T[k].reshape((-1, 1)) * X, axis=0)
num *= self.precs_[k]
den = 1. + self.precs_[k] * np.sum(z.T[k])
self.means_[k] = num / den
elif self.covariance_type in ['tied', 'full']:
if self.covariance_type == 'tied':
cov = self.precs_
else:
cov = self.precs_[k]
den = np.identity(n_features) + cov * np.sum(z.T[k])
num = np.sum(z.T[k].reshape((-1, 1)) * X, axis=0)
num = np.dot(cov, num)
self.means_[k] = linalg.lstsq(den, num)[0]
def _update_precisions(self, X, z):
"""Update the variational distributions for the precisions"""
n_features = X.shape[1]
if self.covariance_type == 'spherical':
self.dof_ = 0.5 * n_features * np.sum(z, axis=0)
for k in range(self.n_components):
# could be more memory efficient ?
sq_diff = np.sum((X - self.means_[k]) ** 2, axis=1)
self.scale_[k] = 1.
self.scale_[k] += 0.5 * np.sum(z.T[k] * (sq_diff + n_features))
self.bound_prec_[k] = (
0.5 * n_features * (
digamma(self.dof_[k]) - np.log(self.scale_[k])))
self.precs_ = np.tile(self.dof_ / self.scale_, [n_features, 1]).T
elif self.covariance_type == 'diag':
for k in range(self.n_components):
self.dof_[k].fill(1. + 0.5 * np.sum(z.T[k], axis=0))
sq_diff = (X - self.means_[k]) ** 2 # see comment above
self.scale_[k] = np.ones(n_features) + 0.5 * np.dot(
z.T[k], (sq_diff + 1))
self.precs_[k] = self.dof_[k] / self.scale_[k]
self.bound_prec_[k] = 0.5 * np.sum(digamma(self.dof_[k])
- np.log(self.scale_[k]))
self.bound_prec_[k] -= 0.5 * np.sum(self.precs_[k])
elif self.covariance_type == 'tied':
self.dof_ = 2 + X.shape[0] + n_features
self.scale_ = (X.shape[0] + 1) * np.identity(n_features)
for k in range(self.n_components):
diff = X - self.means_[k]
self.scale_ += np.dot(diff.T, z[:, k:k + 1] * diff)
self.scale_ = pinvh(self.scale_)
self.precs_ = self.dof_ * self.scale_
self.det_scale_ = linalg.det(self.scale_)
self.bound_prec_ = 0.5 * wishart_log_det(
self.dof_, self.scale_, self.det_scale_, n_features)
self.bound_prec_ -= 0.5 * self.dof_ * np.trace(self.scale_)
elif self.covariance_type == 'full':
for k in range(self.n_components):
sum_resp = np.sum(z.T[k])
self.dof_[k] = 2 + sum_resp + n_features
self.scale_[k] = (sum_resp + 1) * np.identity(n_features)
diff = X - self.means_[k]
self.scale_[k] += np.dot(diff.T, z[:, k:k + 1] * diff)
self.scale_[k] = pinvh(self.scale_[k])
self.precs_[k] = self.dof_[k] * self.scale_[k]
self.det_scale_[k] = linalg.det(self.scale_[k])
self.bound_prec_[k] = 0.5 * wishart_log_det(
self.dof_[k], self.scale_[k], self.det_scale_[k],
n_features)
self.bound_prec_[k] -= 0.5 * self.dof_[k] * np.trace(
self.scale_[k])
def _monitor(self, X, z, n, end=False):
"""Monitor the lower bound during iteration
Debug method to help see exactly when it is failing to converge as
expected.
Note: this is very expensive and should not be used by default."""
if self.verbose:
print("Bound after updating %8s: %f" % (n, self.lower_bound(X, z)))
if end:
print("Cluster proportions:", self.gamma_.T[1])
print("covariance_type:", self.covariance_type)
def _do_mstep(self, X, z, params):
"""Maximize the variational lower bound
Update each of the parameters to maximize the lower bound."""
self._monitor(X, z, "z")
self._update_concentration(z)
self._monitor(X, z, "gamma")
if 'm' in params:
self._update_means(X, z)
self._monitor(X, z, "mu")
if 'c' in params:
self._update_precisions(X, z)
self._monitor(X, z, "a and b", end=True)
def _initialize_gamma(self):
"Initializes the concentration parameters"
self.gamma_ = self.alpha * np.ones((self.n_components, 3))
def _bound_concentration(self):
"""The variational lower bound for the concentration parameter."""
logprior = gammaln(self.alpha) * self.n_components
logprior += np.sum((self.alpha - 1) * (
digamma(self.gamma_.T[2]) - digamma(self.gamma_.T[1] +
self.gamma_.T[2])))
logprior += np.sum(- gammaln(self.gamma_.T[1] + self.gamma_.T[2]))
logprior += np.sum(gammaln(self.gamma_.T[1]) +
gammaln(self.gamma_.T[2]))
logprior -= np.sum((self.gamma_.T[1] - 1) * (
digamma(self.gamma_.T[1]) - digamma(self.gamma_.T[1] +
self.gamma_.T[2])))
logprior -= np.sum((self.gamma_.T[2] - 1) * (
digamma(self.gamma_.T[2]) - digamma(self.gamma_.T[1] +
self.gamma_.T[2])))
return logprior
def _bound_means(self):
"The variational lower bound for the mean parameters"
logprior = 0.
logprior -= 0.5 * squared_norm(self.means_)
logprior -= 0.5 * self.means_.shape[1] * self.n_components
return logprior
def _bound_precisions(self):
"""Returns the bound term related to precisions"""
logprior = 0.
if self.covariance_type == 'spherical':
logprior += np.sum(gammaln(self.dof_))
logprior -= np.sum(
(self.dof_ - 1) * digamma(np.maximum(0.5, self.dof_)))
logprior += np.sum(- np.log(self.scale_) + self.dof_
- self.precs_[:, 0])
elif self.covariance_type == 'diag':
logprior += np.sum(gammaln(self.dof_))
logprior -= np.sum(
(self.dof_ - 1) * digamma(np.maximum(0.5, self.dof_)))
logprior += np.sum(- np.log(self.scale_) + self.dof_ - self.precs_)
elif self.covariance_type == 'tied':
logprior += _bound_wishart(self.dof_, self.scale_, self.det_scale_)
elif self.covariance_type == 'full':
for k in range(self.n_components):
logprior += _bound_wishart(self.dof_[k],
self.scale_[k],
self.det_scale_[k])
return logprior
def _bound_proportions(self, z):
"""Returns the bound term related to proportions"""
dg12 = digamma(self.gamma_.T[1] + self.gamma_.T[2])
dg1 = digamma(self.gamma_.T[1]) - dg12
dg2 = digamma(self.gamma_.T[2]) - dg12
cz = np.cumsum(z[:, ::-1], axis=-1)[:, -2::-1]
logprior = np.sum(cz * dg2[:-1]) + np.sum(z * dg1)
del cz # Save memory
z_non_zeros = z[z > np.finfo(np.float32).eps]
logprior -= np.sum(z_non_zeros * np.log(z_non_zeros))
return logprior
def _logprior(self, z):
logprior = self._bound_concentration()
logprior += self._bound_means()
logprior += self._bound_precisions()
logprior += self._bound_proportions(z)
return logprior
def lower_bound(self, X, z):
"""returns a lower bound on model evidence based on X and membership"""
if self.covariance_type not in ['full', 'tied', 'diag', 'spherical']:
raise NotImplementedError("This ctype is not implemented: %s"
% self.covariance_type)
X = np.asarray(X)
if X.ndim == 1:
X = X[:, np.newaxis]
c = np.sum(z * _bound_state_log_lik(X, self._initial_bound +
self.bound_prec_, self.precs_,
self.means_, self.covariance_type))
return c + self._logprior(z)
def _set_weights(self):
for i in xrange(self.n_components):
self.weights_[i] = self.gamma_[i, 1] / (self.gamma_[i, 1]
+ self.gamma_[i, 2])
self.weights_ /= np.sum(self.weights_)
def fit(self, X):
"""Estimate model parameters with the variational
algorithm.
For a full derivation and description of the algorithm see
doc/modules/dp-derivation.rst
or
http://scikit-learn.org/stable/modules/dp-derivation.html
A initialization step is performed before entering the em
algorithm. If you want to avoid this step, set the keyword
argument init_params to the empty string '' when when creating
the object. Likewise, if you would like just to do an
initialization, set n_iter=0.
Parameters
----------
X : array_like, shape (n, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
"""
self.random_state = check_random_state(self.random_state)
## initialization step
X = np.asarray(X)
if X.ndim == 1:
X = X[:, np.newaxis]
n_features = X.shape[1]
z = np.ones((X.shape[0], self.n_components))
z /= self.n_components
self._initial_bound = - 0.5 * n_features * np.log(2 * np.pi)
self._initial_bound -= np.log(2 * np.pi * np.e)
if (self.init_params != '') or not hasattr(self, 'gamma_'):
self._initialize_gamma()
if 'm' in self.init_params or not hasattr(self, 'means_'):
self.means_ = cluster.KMeans(
n_clusters=self.n_components,
random_state=self.random_state).fit(X).cluster_centers_[::-1]
if 'w' in self.init_params or not hasattr(self, 'weights_'):
self.weights_ = np.tile(1.0 / self.n_components, self.n_components)
if 'c' in self.init_params or not hasattr(self, 'precs_'):
if self.covariance_type == 'spherical':
self.dof_ = np.ones(self.n_components)
self.scale_ = np.ones(self.n_components)
self.precs_ = np.ones((self.n_components, n_features))
self.bound_prec_ = 0.5 * n_features * (
digamma(self.dof_) - np.log(self.scale_))
elif self.covariance_type == 'diag':
self.dof_ = 1 + 0.5 * n_features
self.dof_ *= np.ones((self.n_components, n_features))
self.scale_ = np.ones((self.n_components, n_features))
self.precs_ = np.ones((self.n_components, n_features))
self.bound_prec_ = 0.5 * (np.sum(digamma(self.dof_) -
np.log(self.scale_), 1))
self.bound_prec_ -= 0.5 * np.sum(self.precs_, 1)
elif self.covariance_type == 'tied':
self.dof_ = 1.
self.scale_ = np.identity(n_features)
self.precs_ = np.identity(n_features)
self.det_scale_ = 1.
self.bound_prec_ = 0.5 * wishart_log_det(
self.dof_, self.scale_, self.det_scale_, n_features)
self.bound_prec_ -= 0.5 * self.dof_ * np.trace(self.scale_)
elif self.covariance_type == 'full':
self.dof_ = (1 + self.n_components + X.shape[0])
self.dof_ *= np.ones(self.n_components)
self.scale_ = [2 * np.identity(n_features)
for _ in range(self.n_components)]
self.precs_ = [np.identity(n_features)
for _ in range(self.n_components)]
self.det_scale_ = np.ones(self.n_components)
self.bound_prec_ = np.zeros(self.n_components)
for k in range(self.n_components):
self.bound_prec_[k] = wishart_log_det(
self.dof_[k], self.scale_[k], self.det_scale_[k],
n_features)
self.bound_prec_[k] -= (self.dof_[k] *
np.trace(self.scale_[k]))
self.bound_prec_ *= 0.5
logprob = []
# reset self.converged_ to False
self.converged_ = False
for i in range(self.n_iter):
# Expectation step
curr_logprob, z = self.score_samples(X)
logprob.append(curr_logprob.sum() + self._logprior(z))
# Check for convergence.
if i > 0 and abs(logprob[-1] - logprob[-2]) < self.thresh:
self.converged_ = True
break
# Maximization step
self._do_mstep(X, z, self.params)
self._set_weights()
return self
class VBGMM(DPGMM):
"""Variational Inference for the Gaussian Mixture Model
Variational inference for a Gaussian mixture model probability
distribution. This class allows for easy and efficient inference
of an approximate posterior distribution over the parameters of a
Gaussian mixture model with a fixed number of components.
Initialization is with normally-distributed means and identity
covariance, for proper convergence.
Parameters
----------
n_components: int, optional
Number of mixture components. Defaults to 1.
covariance_type: string, optional
String describing the type of covariance parameters to
use. Must be one of 'spherical', 'tied', 'diag', 'full'.
Defaults to 'diag'.
alpha: float, optional
Real number representing the concentration parameter of
the dirichlet distribution. Intuitively, the higher the
value of alpha the more likely the variational mixture of
Gaussians model will use all components it can. Defaults
to 1.
Attributes
----------
covariance_type : string
String describing the type of covariance parameters used by
the DP-GMM. Must be one of 'spherical', 'tied', 'diag', 'full'.
n_features : int
Dimensionality of the Gaussians.
n_components : int (read-only)
Number of mixture components.
weights_ : array, shape (`n_components`,)
Mixing weights for each mixture component.
means_ : array, shape (`n_components`, `n_features`)
Mean parameters for each mixture component.
precs_ : array
Precision (inverse covariance) parameters for each mixture
component. The shape depends on `covariance_type`::
(`n_components`, 'n_features') if 'spherical',
(`n_features`, `n_features`) if 'tied',
(`n_components`, `n_features`) if 'diag',
(`n_components`, `n_features`, `n_features`) if 'full'
converged_ : bool
True when convergence was reached in fit(), False
otherwise.
See Also
--------
GMM : Finite Gaussian mixture model fit with EM
DPGMM : Ininite Gaussian mixture model, using the dirichlet
process, fit with a variational algorithm
"""
def __init__(self, n_components=1, covariance_type='diag', alpha=1.0,
random_state=None, thresh=1e-2, verbose=False,
min_covar=None, n_iter=10, params='wmc', init_params='wmc'):
super(VBGMM, self).__init__(
n_components, covariance_type, random_state=random_state,
thresh=thresh, verbose=verbose, min_covar=min_covar,
n_iter=n_iter, params=params, init_params=init_params)
self.alpha = float(alpha) / n_components
def score_samples(self, X):
"""Return the likelihood of the data under the model.
Compute the bound on log probability of X under the model
and return the posterior distribution (responsibilities) of
each mixture component for each element of X.
This is done by computing the parameters for the mean-field of
z for each observation.
Parameters
----------
X : array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
logprob : array_like, shape (n_samples,)
Log probabilities of each data point in X
responsibilities: array_like, shape (n_samples, n_components)
Posterior probabilities of each mixture component for each
observation
"""
X = np.asarray(X)
if X.ndim == 1:
X = X[:, np.newaxis]
dg = digamma(self.gamma_) - digamma(np.sum(self.gamma_))
if self.covariance_type not in ['full', 'tied', 'diag', 'spherical']:
raise NotImplementedError("This ctype is not implemented: %s"
% self.covariance_type)
p = _bound_state_log_lik(X, self._initial_bound + self.bound_prec_,
self.precs_, self.means_,
self.covariance_type)
z = p + dg
z = log_normalize(z, axis=-1)
bound = np.sum(z * p, axis=-1)
return bound, z
def _update_concentration(self, z):
for i in range(self.n_components):
self.gamma_[i] = self.alpha + np.sum(z.T[i])
def _initialize_gamma(self):
self.gamma_ = self.alpha * np.ones(self.n_components)
def _bound_proportions(self, z):
logprior = 0.
dg = digamma(self.gamma_)
dg -= digamma(np.sum(self.gamma_))
logprior += np.sum(dg.reshape((-1, 1)) * z.T)
z_non_zeros = z[z > np.finfo(np.float32).eps]
logprior -= np.sum(z_non_zeros * np.log(z_non_zeros))
return logprior
def _bound_concentration(self):
logprior = 0.
logprior = gammaln(np.sum(self.gamma_)) - gammaln(self.n_components
* self.alpha)
logprior -= np.sum(gammaln(self.gamma_) - gammaln(self.alpha))
sg = digamma(np.sum(self.gamma_))
logprior += np.sum((self.gamma_ - self.alpha)
* (digamma(self.gamma_) - sg))
return logprior
def _monitor(self, X, z, n, end=False):
"""Monitor the lower bound during iteration
Debug method to help see exactly when it is failing to converge as
expected.
Note: this is very expensive and should not be used by default."""
if self.verbose:
print("Bound after updating %8s: %f" % (n, self.lower_bound(X, z)))
if end:
print("Cluster proportions:", self.gamma_)
print("covariance_type:", self.covariance_type)
def _set_weights(self):
self.weights_[:] = self.gamma_
self.weights_ /= np.sum(self.weights_)
|
ankurankan/scikit-learn
|
sklearn/mixture/dpgmm.py
|
Python
|
bsd-3-clause
| 30,538
|
[
"Gaussian"
] |
1390d004d7b6feb1058636baa31251211f19da9be25fd43a136f48fcdf2f76fc
|
from __future__ import absolute_import
from . import tempdir as td
import os.path
import py.path
import pybol
import pytest
import numpy as np
import MDAnalysis as mda
from MDAnalysis.exceptions import NoDataError, SelectionError
from gromacs.utilities import in_dir
from ..analysis.ensemble import Ensemble, EnsembleAnalysis, EnsembleAtomGroup
from ..analysis.dihedral import DihedralAnalysis
from pkg_resources import resource_filename
RESOURCES = py.path.local(resource_filename(__name__, 'testing_resources'))
MANIFEST = RESOURCES.join("manifest.yml")
ensemble_keys = [('water', 'Coulomb', '0000'),
('water', 'Coulomb', '0500'),
('water', 'Coulomb', '1000'),
('water', 'VDW', '0000'),
('water', 'VDW', '0250'),
('water', 'VDW', '0500'),
('water', 'VDW', '1000')]
class TestEnsemble(object):
def setup(self):
self.tmpdir = td.TempDir()
self.m = pybol.Manifest(str(RESOURCES / 'manifest.yml'))
self.m.assemble('example_FEP', self.tmpdir.name)
def teardown(self):
self.tmpdir.dissolve()
def test_build_ensemble(self):
# Octanol will be added later
Sim = Ensemble(dirname=self.tmpdir.name, solvents=['water'])
diff = set(Sim.keys()) ^ set(ensemble_keys)
assert not diff
def test_kwargs(self):
l_dir = os.path.abspath(os.path.join(self.tmpdir.name, 'FEP', 'md.gro'))
bnz = Ensemble(dirname=self.tmpdir.name, solvents=['water'], topology_paths={'water': l_dir})
diff = set(bnz.keys()) ^ set(ensemble_keys)
assert not diff
def test_add_remove_systems(self):
with in_dir(self.tmpdir.name, create=False):
bnz = Ensemble()
l_dir = os.path.join(os.curdir, 'FEP', 'water', 'Coulomb', '0000')
top_dir = os.path.join(l_dir, 'md.gro')
trj_dir = os.path.join(l_dir, 'md_red.xtc')
U = mda.Universe(top_dir, trj_dir)
bnz.add_system(('water', 'Coulomb', '0000'), U)
assert bnz.keys() == [('water', 'Coulomb', '0000')]
assert bnz._num_systems == 1
assert bnz.__repr__() == "<Ensemble Containing 1 System>"
assert len(bnz) == 1
bnz.pop(('water', 'Coulomb', '0000'))
assert bnz._num_systems == 0
assert len(bnz) == 0
def test_select_atoms(self):
Sim = Ensemble(dirname=self.tmpdir.name, solvents=['water'])
solute = Sim.select_atoms('not resname SOL')
assert len(solute) == 7
for k in solute.keys():
assert len(solute[k]) == 42
def test_select_systems(self):
Sim = Ensemble(dirname=self.tmpdir.name, solvents=['water'])
Sel1 = Sim.select_systems(keys=[('water', 'Coulomb', '0000'),
('water', 'VDW', '0500')])
assert Sel1.keys() == [('water', 'Coulomb', '0000'),
('water', 'VDW', '0500')]
Sel2 = Sim.select_systems(solvents=['water'], interactions=['Coulomb'],
lambdas=['0000', '1000'])
assert Sel2.keys() == [('water', 'Coulomb', '0000'),
('water', 'Coulomb', '1000')]
Sel3 = Sim.select_systems(solvents=['water'], interactions=['VDW'],
lambda_range=[0, 1])
diff = set(Sel3.keys()) ^ set(ensemble_keys[3:])
assert not diff
def test_ensemble_ag_methods(self):
Solv_system = Ensemble(dirname=self.tmpdir.name, solvents=['water'])
Sol1 = Solv_system.select_atoms('resname SOL')
Sol2 = Sol1.select_atoms('resid 2')
Sol2_pos = Sol2.positions()
assert len(Sol2_pos) > 0
for k in Sol2_pos:
assert np.shape(Sol2_pos[k]) == (3, 3)
assert not Sol1 == Sol2
assert isinstance(Sol2, EnsembleAtomGroup)
assert Sol2 == Sol1.select_atoms('resid 2')
assert ensemble_keys.sort() == Sol1.ensemble.keys().sort()
Sol1._groups.pop(('water', 'Coulomb', '0000'))
Sol1._keys = Sol1._groups.keys()
assert not Sol1 == Sol2
pos2 = Sol2.positions(keys=[('water', 'Coulomb', '0000')])
assert np.shape(pos2[('water', 'Coulomb', '0000')]) == (3, 3)
def test_ensemble_init_exception(self):
with pytest.raises(FileNotFoundError):
Ens = Ensemble(dirname='foo')
def test_ensemble_build_exceptions(self):
with pytest.raises(NoDataError):
ens = Ensemble(self.tmpdir.name, solvents=['test_solv'])
def test_ensemble_selection_error(self):
ens = Ensemble(dirname=self.tmpdir.name, solvents=['water'])
sel1 = ens.select_atoms('resid 1')
with pytest.raises(SelectionError):
ens.select_atoms('foo')
with pytest.raises(SelectionError):
sel1.select_atoms('foo')
def test_ensemble_analysis(self):
class TestAnalysis(EnsembleAnalysis):
def __init__(self, test_ensemble):
super(TestAnalysis, self).__init__(test_ensemble)
self._ens = test_ensemble
def _prepare_ensemble(self):
self.key_list = []
def _single_universe(self):
self.key_list.append(self._key)
def _single_frame(self):
assert len(self._system.select_atoms('not resname SOL')) == 42
def _conclude_universe(self):
assert self.n_frames == self.stop
Sim = Ensemble(dirname=self.tmpdir.name, solvents=['water'])
TestRun = TestAnalysis(Sim).run(start=0, step=1, stop=10)
assert Sim.keys() == TestRun.key_list
def test_value_error(self):
ens = Ensemble(dirname=self.tmpdir.name, solvents=['water'])
copy_ens = Ensemble()
copy_ens._ensemble_dir = self.tmpdir.name
for k in ens.keys():
copy_ens.add_system(k, ens[k])
dh1 = ens.select_atoms('name C4 or name C17 or name S2 or name N3')
dh2 = copy_ens.select_atoms('name C4 or name C17 or name S2 or name N3')
dh3 = ens.select_atoms('name C4 or name C17 or name S2 or name N3')
dh4 = ens.select_atoms('name C4 or name C17 or name S2 or name N3')
with pytest.raises(ValueError):
dh_run = DihedralAnalysis([dh1, dh2, dh4, dh3]).run(start=0, stop=4, step=1)
|
Becksteinlab/MDPOW
|
mdpow/tests/test_ensemble.py
|
Python
|
gpl-3.0
| 6,424
|
[
"Gromacs",
"MDAnalysis"
] |
949bda64fb5eb4e58060e45930a23b3a3051cc9e4b70bb053f57c30df5908f7f
|
"""Test analytical calculation of gradients of the target function versus finite
difference calculations"""
from __future__ import annotations
def test(args=[]):
# Python and cctbx imports
import random
from math import pi
from cctbx.sgtbx import space_group, space_group_symbols
# We will set up a mock scan and a mock experiment list
from dxtbx.model import ScanFactory
from dxtbx.model.experiment_list import Experiment, ExperimentList
from libtbx.phil import parse
from libtbx.test_utils import approx_equal
from scitbx import matrix
from scitbx.array_family import flex
from dials.algorithms.refinement.parameterisation.beam_parameters import (
BeamParameterisation,
)
from dials.algorithms.refinement.parameterisation.crystal_parameters import (
CrystalOrientationParameterisation,
CrystalUnitCellParameterisation,
)
# Model parameterisations
from dials.algorithms.refinement.parameterisation.detector_parameters import (
DetectorParameterisationSinglePanel,
)
# Parameterisation of the prediction equation
from dials.algorithms.refinement.parameterisation.prediction_parameters import (
XYPhiPredictionParameterisation,
)
from dials.algorithms.refinement.prediction.managed_predictors import (
ScansExperimentsPredictor,
ScansRayPredictor,
)
from dials.algorithms.refinement.reflection_manager import ReflectionManager
# Imports for the target function
from dials.algorithms.refinement.target import (
LeastSquaresPositionalResidualWithRmsdCutoff,
)
# Reflection prediction
from dials.algorithms.spot_prediction import IndexGenerator, ray_intersection
# Experimental model builder
from dials.tests.algorithms.refinement.setup_geometry import Extract
# Local functions
def random_direction_close_to(vector, sd=0.5):
return vector.rotate_around_origin(
matrix.col((random.random(), random.random(), random.random())).normalize(),
random.gauss(0, sd),
deg=True,
)
#############################
# Setup experimental models #
#############################
# make a small cell to speed up calculations
overrides = """geometry.parameters.crystal.a.length.range = 10 15
geometry.parameters.crystal.b.length.range = 10 15
geometry.parameters.crystal.c.length.range = 10 15"""
master_phil = parse(
"""
include scope dials.tests.algorithms.refinement.geometry_phil
""",
process_includes=True,
)
models = Extract(master_phil, overrides, cmdline_args=args)
mydetector = models.detector
mygonio = models.goniometer
mycrystal = models.crystal
mybeam = models.beam
# Build a mock scan for a 180 degree sequence of 0.1 degree images
sf = ScanFactory()
myscan = sf.make_scan(
image_range=(1, 1800),
exposure_times=0.1,
oscillation=(0, 0.1),
epochs=list(range(1800)),
deg=True,
)
sequence_range = myscan.get_oscillation_range(deg=False)
im_width = myscan.get_oscillation(deg=False)[1]
assert sequence_range == (0.0, pi)
assert approx_equal(im_width, 0.1 * pi / 180.0)
experiments = ExperimentList()
experiments.append(
Experiment(
beam=mybeam,
detector=mydetector,
goniometer=mygonio,
scan=myscan,
crystal=mycrystal,
imageset=None,
)
)
###########################
# Parameterise the models #
###########################
det_param = DetectorParameterisationSinglePanel(mydetector)
s0_param = BeamParameterisation(mybeam, mygonio)
xlo_param = CrystalOrientationParameterisation(mycrystal)
xluc_param = CrystalUnitCellParameterisation(mycrystal)
########################################################################
# Link model parameterisations together into a parameterisation of the #
# prediction equation #
########################################################################
pred_param = XYPhiPredictionParameterisation(
experiments, [det_param], [s0_param], [xlo_param], [xluc_param]
)
################################
# Apply known parameter shifts #
################################
# shift detector by 0.2 mm each translation and 2 mrad each rotation
det_p_vals = det_param.get_param_vals()
p_vals = [a + b for a, b in zip(det_p_vals, [2.0, 2.0, 2.0, 2.0, 2.0, 2.0])]
det_param.set_param_vals(p_vals)
# shift beam by 2 mrad in one axis
s0_p_vals = s0_param.get_param_vals()
p_vals = list(s0_p_vals)
p_vals[1] += 2.0
s0_param.set_param_vals(p_vals)
# rotate crystal a bit (=2 mrad each rotation)
xlo_p_vals = xlo_param.get_param_vals()
p_vals = [a + b for a, b in zip(xlo_p_vals, [2.0, 2.0, 2.0])]
xlo_param.set_param_vals(p_vals)
#############################
# Generate some reflections #
#############################
# All indices in a 2.0 Angstrom sphere
resolution = 2.0
index_generator = IndexGenerator(
mycrystal.get_unit_cell(),
space_group(space_group_symbols(1).hall()).type(),
resolution,
)
indices = index_generator.to_array()
# Predict rays within the sequence range
ray_predictor = ScansRayPredictor(experiments, sequence_range)
obs_refs = ray_predictor(indices)
# Take only those rays that intersect the detector
intersects = ray_intersection(mydetector, obs_refs)
obs_refs = obs_refs.select(intersects)
# Make a reflection predictor and re-predict for all these reflections. The
# result is the same, but we gain also the flags and xyzcal.px columns
ref_predictor = ScansExperimentsPredictor(experiments)
obs_refs["id"] = flex.int(len(obs_refs), 0)
obs_refs = ref_predictor(obs_refs)
# Set 'observed' centroids from the predicted ones
obs_refs["xyzobs.mm.value"] = obs_refs["xyzcal.mm"]
# Invent some variances for the centroid positions of the simulated data
im_width = 0.1 * pi / 180.0
px_size = mydetector[0].get_pixel_size()
var_x = flex.double(len(obs_refs), (px_size[0] / 2.0) ** 2)
var_y = flex.double(len(obs_refs), (px_size[1] / 2.0) ** 2)
var_phi = flex.double(len(obs_refs), (im_width / 2.0) ** 2)
obs_refs["xyzobs.mm.variance"] = flex.vec3_double(var_x, var_y, var_phi)
###############################
# Undo known parameter shifts #
###############################
s0_param.set_param_vals(s0_p_vals)
det_param.set_param_vals(det_p_vals)
xlo_param.set_param_vals(xlo_p_vals)
#####################################
# Select reflections for refinement #
#####################################
refman = ReflectionManager(obs_refs, experiments)
##############################
# Set up the target function #
##############################
# Redefine the reflection predictor to use the type expected by the Target class
ref_predictor = ScansExperimentsPredictor(experiments)
mytarget = LeastSquaresPositionalResidualWithRmsdCutoff(
experiments, ref_predictor, refman, pred_param, restraints_parameterisation=None
)
# get the functional and gradients
mytarget.predict()
L, dL_dp, curvs = mytarget.compute_functional_gradients_and_curvatures()
####################################
# Do FD calculation for comparison #
####################################
# function for calculating finite difference gradients of the target function
def get_fd_gradients(target, pred_param, deltas):
"""Calculate centered finite difference gradients for each of the
parameters of the target function.
"deltas" must be a sequence of the same length as the parameter list, and
contains the step size for the difference calculations for each parameter.
"""
p_vals = pred_param.get_param_vals()
assert len(deltas) == len(p_vals)
fd_grad = []
fd_curvs = []
for i in range(len(deltas)):
val = p_vals[i]
p_vals[i] -= deltas[i] / 2.0
pred_param.set_param_vals(p_vals)
target.predict()
rev_state = target.compute_functional_gradients_and_curvatures()
p_vals[i] += deltas[i]
pred_param.set_param_vals(p_vals)
target.predict()
fwd_state = target.compute_functional_gradients_and_curvatures()
# finite difference estimation of first derivatives
fd_grad.append((fwd_state[0] - rev_state[0]) / deltas[i])
# finite difference estimation of curvatures, using the analytical
# first derivatives
fd_curvs.append((fwd_state[1][i] - rev_state[1][i]) / deltas[i])
# set parameter back to centred value
p_vals[i] = val
# return to the initial state
pred_param.set_param_vals(p_vals)
return fd_grad, fd_curvs
# test normalised differences between FD and analytical calculations
fdgrads = get_fd_gradients(mytarget, pred_param, [1.0e-7] * len(pred_param))
diffs = [a - b for a, b in zip(dL_dp, fdgrads[0])]
norm_diffs = tuple([a / b for a, b in zip(diffs, fdgrads[0])])
for e in norm_diffs:
assert abs(e) < 0.001 # check differences less than 0.1%
# test normalised differences between FD curvatures and analytical least
# squares approximation. We don't expect this to be especially close
if curvs:
diffs = [a - b for a, b in zip(curvs, fdgrads[1])]
norm_diffs = tuple([a / b for a, b in zip(diffs, fdgrads[1])])
for e in norm_diffs:
assert abs(e) < 0.1 # check differences less than 10%
|
dials/dials
|
tests/algorithms/refinement/test_finite_diffs.py
|
Python
|
bsd-3-clause
| 9,921
|
[
"CRYSTAL"
] |
582392a09f37c1da11476e0ab49f62ff717896ef13f1ba0917cf2aeec3e72f66
|
# Copyright 2008-2015 Nokia Networks
# Copyright 2016- Robot Framework Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from robot.model import SuiteVisitor
from robot.utils import plural_or_not, secs_to_timestr
from .highlighting import HighlightingStream
class DottedOutput(object):
def __init__(self, width=78, colors='AUTO', stdout=None, stderr=None):
self._width = width
self._stdout = HighlightingStream(stdout or sys.__stdout__, colors)
self._stderr = HighlightingStream(stderr or sys.__stderr__, colors)
self._markers_on_row = 0
def start_suite(self, suite):
if not suite.parent:
self._stdout.write("Running suite '%s' with %d tests.\n"
% (suite.name, suite.test_count))
self._stdout.write('=' * self._width + '\n')
def end_test(self, test):
if self._markers_on_row == self._width:
self._stdout.write('\n')
self._markers_on_row = 0
self._markers_on_row += 1
if test.passed:
self._stdout.write('.')
elif 'robot-exit' in test.tags:
self._stdout.write('x')
elif not test.critical:
self._stdout.write('f')
else:
self._stdout.highlight('F', 'FAIL')
def end_suite(self, suite):
if not suite.parent:
self._stdout.write('\n')
StatusReporter(self._stdout, self._width).report(suite)
self._stdout.write('\n')
def message(self, msg):
if msg.level in ('WARN', 'ERROR'):
self._stderr.error(msg.message, msg.level)
def output_file(self, name, path):
self._stdout.write('%-8s %s\n' % (name+':', path))
class StatusReporter(SuiteVisitor):
def __init__(self, stream, width):
self._stream = stream
self._width = width
def report(self, suite):
suite.visit(self)
stats = suite.statistics
self._stream.write("%s\nRun suite '%s' with %d test%s in %s.\n\n"
% ('=' * self._width, suite.name,
stats.all.total, plural_or_not(stats.all.total),
secs_to_timestr(suite.elapsedtime/1000.0)))
self._stream.highlight(suite.status + 'ED', suite.status)
self._stream.write('\n%s\n' % stats.message)
def visit_test(self, test):
if not test.passed and test.critical and 'robot-exit' not in test.tags:
self._stream.write('-' * self._width + '\n')
self._stream.highlight('FAIL')
self._stream.write(': %s\n%s\n' % (test.longname,
test.message.strip()))
|
alexandrul-ci/robotframework
|
src/robot/output/console/dotted.py
|
Python
|
apache-2.0
| 3,228
|
[
"VisIt"
] |
8b6c03d1f81051a58f4e7ee681377b797adaacb11608a78022d3837495536e85
|
# encoding: utf-8
"""
Tests for IPython.utils.traitlets.
Authors:
* Brian Granger
* Enthought, Inc. Some of the code in this file comes from enthought.traits
and is licensed under the BSD license. Also, many of the ideas also come
from enthought.traits even though our implementation is very different.
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2008-2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import re
import sys
from unittest import TestCase
import nose.tools as nt
from nose import SkipTest
from IPython.utils.traitlets import (
HasTraits, MetaHasTraits, TraitType, Any, CBytes, Dict,
Int, Long, Integer, Float, Complex, Bytes, Unicode, TraitError,
Undefined, Type, This, Instance, TCPAddress, List, Tuple,
ObjectName, DottedObjectName, CRegExp, link
)
from IPython.utils import py3compat
from IPython.testing.decorators import skipif
#-----------------------------------------------------------------------------
# Helper classes for testing
#-----------------------------------------------------------------------------
class HasTraitsStub(HasTraits):
def _notify_trait(self, name, old, new):
self._notify_name = name
self._notify_old = old
self._notify_new = new
#-----------------------------------------------------------------------------
# Test classes
#-----------------------------------------------------------------------------
class TestTraitType(TestCase):
def test_get_undefined(self):
class A(HasTraits):
a = TraitType
a = A()
self.assertEqual(a.a, Undefined)
def test_set(self):
class A(HasTraitsStub):
a = TraitType
a = A()
a.a = 10
self.assertEqual(a.a, 10)
self.assertEqual(a._notify_name, 'a')
self.assertEqual(a._notify_old, Undefined)
self.assertEqual(a._notify_new, 10)
def test_validate(self):
class MyTT(TraitType):
def validate(self, inst, value):
return -1
class A(HasTraitsStub):
tt = MyTT
a = A()
a.tt = 10
self.assertEqual(a.tt, -1)
def test_default_validate(self):
class MyIntTT(TraitType):
def validate(self, obj, value):
if isinstance(value, int):
return value
self.error(obj, value)
class A(HasTraits):
tt = MyIntTT(10)
a = A()
self.assertEqual(a.tt, 10)
# Defaults are validated when the HasTraits is instantiated
class B(HasTraits):
tt = MyIntTT('bad default')
self.assertRaises(TraitError, B)
def test_is_valid_for(self):
class MyTT(TraitType):
def is_valid_for(self, value):
return True
class A(HasTraits):
tt = MyTT
a = A()
a.tt = 10
self.assertEqual(a.tt, 10)
def test_value_for(self):
class MyTT(TraitType):
def value_for(self, value):
return 20
class A(HasTraits):
tt = MyTT
a = A()
a.tt = 10
self.assertEqual(a.tt, 20)
def test_info(self):
class A(HasTraits):
tt = TraitType
a = A()
self.assertEqual(A.tt.info(), 'any value')
def test_error(self):
class A(HasTraits):
tt = TraitType
a = A()
self.assertRaises(TraitError, A.tt.error, a, 10)
def test_dynamic_initializer(self):
class A(HasTraits):
x = Int(10)
def _x_default(self):
return 11
class B(A):
x = Int(20)
class C(A):
def _x_default(self):
return 21
a = A()
self.assertEqual(a._trait_values, {})
self.assertEqual(list(a._trait_dyn_inits.keys()), ['x'])
self.assertEqual(a.x, 11)
self.assertEqual(a._trait_values, {'x': 11})
b = B()
self.assertEqual(b._trait_values, {'x': 20})
self.assertEqual(list(a._trait_dyn_inits.keys()), ['x'])
self.assertEqual(b.x, 20)
c = C()
self.assertEqual(c._trait_values, {})
self.assertEqual(list(a._trait_dyn_inits.keys()), ['x'])
self.assertEqual(c.x, 21)
self.assertEqual(c._trait_values, {'x': 21})
# Ensure that the base class remains unmolested when the _default
# initializer gets overridden in a subclass.
a = A()
c = C()
self.assertEqual(a._trait_values, {})
self.assertEqual(list(a._trait_dyn_inits.keys()), ['x'])
self.assertEqual(a.x, 11)
self.assertEqual(a._trait_values, {'x': 11})
class TestHasTraitsMeta(TestCase):
def test_metaclass(self):
self.assertEqual(type(HasTraits), MetaHasTraits)
class A(HasTraits):
a = Int
a = A()
self.assertEqual(type(a.__class__), MetaHasTraits)
self.assertEqual(a.a,0)
a.a = 10
self.assertEqual(a.a,10)
class B(HasTraits):
b = Int()
b = B()
self.assertEqual(b.b,0)
b.b = 10
self.assertEqual(b.b,10)
class C(HasTraits):
c = Int(30)
c = C()
self.assertEqual(c.c,30)
c.c = 10
self.assertEqual(c.c,10)
def test_this_class(self):
class A(HasTraits):
t = This()
tt = This()
class B(A):
tt = This()
ttt = This()
self.assertEqual(A.t.this_class, A)
self.assertEqual(B.t.this_class, A)
self.assertEqual(B.tt.this_class, B)
self.assertEqual(B.ttt.this_class, B)
class TestHasTraitsNotify(TestCase):
def setUp(self):
self._notify1 = []
self._notify2 = []
def notify1(self, name, old, new):
self._notify1.append((name, old, new))
def notify2(self, name, old, new):
self._notify2.append((name, old, new))
def test_notify_all(self):
class A(HasTraits):
a = Int
b = Float
a = A()
a.on_trait_change(self.notify1)
a.a = 0
self.assertEqual(len(self._notify1),0)
a.b = 0.0
self.assertEqual(len(self._notify1),0)
a.a = 10
self.assertTrue(('a',0,10) in self._notify1)
a.b = 10.0
self.assertTrue(('b',0.0,10.0) in self._notify1)
self.assertRaises(TraitError,setattr,a,'a','bad string')
self.assertRaises(TraitError,setattr,a,'b','bad string')
self._notify1 = []
a.on_trait_change(self.notify1,remove=True)
a.a = 20
a.b = 20.0
self.assertEqual(len(self._notify1),0)
def test_notify_one(self):
class A(HasTraits):
a = Int
b = Float
a = A()
a.on_trait_change(self.notify1, 'a')
a.a = 0
self.assertEqual(len(self._notify1),0)
a.a = 10
self.assertTrue(('a',0,10) in self._notify1)
self.assertRaises(TraitError,setattr,a,'a','bad string')
def test_subclass(self):
class A(HasTraits):
a = Int
class B(A):
b = Float
b = B()
self.assertEqual(b.a,0)
self.assertEqual(b.b,0.0)
b.a = 100
b.b = 100.0
self.assertEqual(b.a,100)
self.assertEqual(b.b,100.0)
def test_notify_subclass(self):
class A(HasTraits):
a = Int
class B(A):
b = Float
b = B()
b.on_trait_change(self.notify1, 'a')
b.on_trait_change(self.notify2, 'b')
b.a = 0
b.b = 0.0
self.assertEqual(len(self._notify1),0)
self.assertEqual(len(self._notify2),0)
b.a = 10
b.b = 10.0
self.assertTrue(('a',0,10) in self._notify1)
self.assertTrue(('b',0.0,10.0) in self._notify2)
def test_static_notify(self):
class A(HasTraits):
a = Int
_notify1 = []
def _a_changed(self, name, old, new):
self._notify1.append((name, old, new))
a = A()
a.a = 0
# This is broken!!!
self.assertEqual(len(a._notify1),0)
a.a = 10
self.assertTrue(('a',0,10) in a._notify1)
class B(A):
b = Float
_notify2 = []
def _b_changed(self, name, old, new):
self._notify2.append((name, old, new))
b = B()
b.a = 10
b.b = 10.0
self.assertTrue(('a',0,10) in b._notify1)
self.assertTrue(('b',0.0,10.0) in b._notify2)
def test_notify_args(self):
def callback0():
self.cb = ()
def callback1(name):
self.cb = (name,)
def callback2(name, new):
self.cb = (name, new)
def callback3(name, old, new):
self.cb = (name, old, new)
class A(HasTraits):
a = Int
a = A()
a.on_trait_change(callback0, 'a')
a.a = 10
self.assertEqual(self.cb,())
a.on_trait_change(callback0, 'a', remove=True)
a.on_trait_change(callback1, 'a')
a.a = 100
self.assertEqual(self.cb,('a',))
a.on_trait_change(callback1, 'a', remove=True)
a.on_trait_change(callback2, 'a')
a.a = 1000
self.assertEqual(self.cb,('a',1000))
a.on_trait_change(callback2, 'a', remove=True)
a.on_trait_change(callback3, 'a')
a.a = 10000
self.assertEqual(self.cb,('a',1000,10000))
a.on_trait_change(callback3, 'a', remove=True)
self.assertEqual(len(a._trait_notifiers['a']),0)
def test_notify_only_once(self):
class A(HasTraits):
listen_to = ['a']
a = Int(0)
b = 0
def __init__(self, **kwargs):
super(A, self).__init__(**kwargs)
self.on_trait_change(self.listener1, ['a'])
def listener1(self, name, old, new):
self.b += 1
class B(A):
c = 0
d = 0
def __init__(self, **kwargs):
super(B, self).__init__(**kwargs)
self.on_trait_change(self.listener2)
def listener2(self, name, old, new):
self.c += 1
def _a_changed(self, name, old, new):
self.d += 1
b = B()
b.a += 1
self.assertEqual(b.b, b.c)
self.assertEqual(b.b, b.d)
b.a += 1
self.assertEqual(b.b, b.c)
self.assertEqual(b.b, b.d)
class TestHasTraits(TestCase):
def test_trait_names(self):
class A(HasTraits):
i = Int
f = Float
a = A()
self.assertEqual(sorted(a.trait_names()),['f','i'])
self.assertEqual(sorted(A.class_trait_names()),['f','i'])
def test_trait_metadata(self):
class A(HasTraits):
i = Int(config_key='MY_VALUE')
a = A()
self.assertEqual(a.trait_metadata('i','config_key'), 'MY_VALUE')
def test_traits(self):
class A(HasTraits):
i = Int
f = Float
a = A()
self.assertEqual(a.traits(), dict(i=A.i, f=A.f))
self.assertEqual(A.class_traits(), dict(i=A.i, f=A.f))
def test_traits_metadata(self):
class A(HasTraits):
i = Int(config_key='VALUE1', other_thing='VALUE2')
f = Float(config_key='VALUE3', other_thing='VALUE2')
j = Int(0)
a = A()
self.assertEqual(a.traits(), dict(i=A.i, f=A.f, j=A.j))
traits = a.traits(config_key='VALUE1', other_thing='VALUE2')
self.assertEqual(traits, dict(i=A.i))
# This passes, but it shouldn't because I am replicating a bug in
# traits.
traits = a.traits(config_key=lambda v: True)
self.assertEqual(traits, dict(i=A.i, f=A.f, j=A.j))
def test_init(self):
class A(HasTraits):
i = Int()
x = Float()
a = A(i=1, x=10.0)
self.assertEqual(a.i, 1)
self.assertEqual(a.x, 10.0)
def test_positional_args(self):
class A(HasTraits):
i = Int(0)
def __init__(self, i):
super(A, self).__init__()
self.i = i
a = A(5)
self.assertEqual(a.i, 5)
# should raise TypeError if no positional arg given
self.assertRaises(TypeError, A)
#-----------------------------------------------------------------------------
# Tests for specific trait types
#-----------------------------------------------------------------------------
class TestType(TestCase):
def test_default(self):
class B(object): pass
class A(HasTraits):
klass = Type
a = A()
self.assertEqual(a.klass, None)
a.klass = B
self.assertEqual(a.klass, B)
self.assertRaises(TraitError, setattr, a, 'klass', 10)
def test_value(self):
class B(object): pass
class C(object): pass
class A(HasTraits):
klass = Type(B)
a = A()
self.assertEqual(a.klass, B)
self.assertRaises(TraitError, setattr, a, 'klass', C)
self.assertRaises(TraitError, setattr, a, 'klass', object)
a.klass = B
def test_allow_none(self):
class B(object): pass
class C(B): pass
class A(HasTraits):
klass = Type(B, allow_none=False)
a = A()
self.assertEqual(a.klass, B)
self.assertRaises(TraitError, setattr, a, 'klass', None)
a.klass = C
self.assertEqual(a.klass, C)
def test_validate_klass(self):
class A(HasTraits):
klass = Type('no strings allowed')
self.assertRaises(ImportError, A)
class A(HasTraits):
klass = Type('rub.adub.Duck')
self.assertRaises(ImportError, A)
def test_validate_default(self):
class B(object): pass
class A(HasTraits):
klass = Type('bad default', B)
self.assertRaises(ImportError, A)
class C(HasTraits):
klass = Type(None, B, allow_none=False)
self.assertRaises(TraitError, C)
def test_str_klass(self):
class A(HasTraits):
klass = Type('IPython.utils.ipstruct.Struct')
from IPython.utils.ipstruct import Struct
a = A()
a.klass = Struct
self.assertEqual(a.klass, Struct)
self.assertRaises(TraitError, setattr, a, 'klass', 10)
class TestInstance(TestCase):
def test_basic(self):
class Foo(object): pass
class Bar(Foo): pass
class Bah(object): pass
class A(HasTraits):
inst = Instance(Foo)
a = A()
self.assertTrue(a.inst is None)
a.inst = Foo()
self.assertTrue(isinstance(a.inst, Foo))
a.inst = Bar()
self.assertTrue(isinstance(a.inst, Foo))
self.assertRaises(TraitError, setattr, a, 'inst', Foo)
self.assertRaises(TraitError, setattr, a, 'inst', Bar)
self.assertRaises(TraitError, setattr, a, 'inst', Bah())
def test_unique_default_value(self):
class Foo(object): pass
class A(HasTraits):
inst = Instance(Foo,(),{})
a = A()
b = A()
self.assertTrue(a.inst is not b.inst)
def test_args_kw(self):
class Foo(object):
def __init__(self, c): self.c = c
class Bar(object): pass
class Bah(object):
def __init__(self, c, d):
self.c = c; self.d = d
class A(HasTraits):
inst = Instance(Foo, (10,))
a = A()
self.assertEqual(a.inst.c, 10)
class B(HasTraits):
inst = Instance(Bah, args=(10,), kw=dict(d=20))
b = B()
self.assertEqual(b.inst.c, 10)
self.assertEqual(b.inst.d, 20)
class C(HasTraits):
inst = Instance(Foo)
c = C()
self.assertTrue(c.inst is None)
def test_bad_default(self):
class Foo(object): pass
class A(HasTraits):
inst = Instance(Foo, allow_none=False)
self.assertRaises(TraitError, A)
def test_instance(self):
class Foo(object): pass
def inner():
class A(HasTraits):
inst = Instance(Foo())
self.assertRaises(TraitError, inner)
class TestThis(TestCase):
def test_this_class(self):
class Foo(HasTraits):
this = This
f = Foo()
self.assertEqual(f.this, None)
g = Foo()
f.this = g
self.assertEqual(f.this, g)
self.assertRaises(TraitError, setattr, f, 'this', 10)
def test_this_inst(self):
class Foo(HasTraits):
this = This()
f = Foo()
f.this = Foo()
self.assertTrue(isinstance(f.this, Foo))
def test_subclass(self):
class Foo(HasTraits):
t = This()
class Bar(Foo):
pass
f = Foo()
b = Bar()
f.t = b
b.t = f
self.assertEqual(f.t, b)
self.assertEqual(b.t, f)
def test_subclass_override(self):
class Foo(HasTraits):
t = This()
class Bar(Foo):
t = This()
f = Foo()
b = Bar()
f.t = b
self.assertEqual(f.t, b)
self.assertRaises(TraitError, setattr, b, 't', f)
class TraitTestBase(TestCase):
"""A best testing class for basic trait types."""
def assign(self, value):
self.obj.value = value
def coerce(self, value):
return value
def test_good_values(self):
if hasattr(self, '_good_values'):
for value in self._good_values:
self.assign(value)
self.assertEqual(self.obj.value, self.coerce(value))
def test_bad_values(self):
if hasattr(self, '_bad_values'):
for value in self._bad_values:
try:
self.assertRaises(TraitError, self.assign, value)
except AssertionError:
assert False, value
def test_default_value(self):
if hasattr(self, '_default_value'):
self.assertEqual(self._default_value, self.obj.value)
def tearDown(self):
# restore default value after tests, if set
if hasattr(self, '_default_value'):
self.obj.value = self._default_value
class AnyTrait(HasTraits):
value = Any
class AnyTraitTest(TraitTestBase):
obj = AnyTrait()
_default_value = None
_good_values = [10.0, 'ten', u'ten', [10], {'ten': 10},(10,), None, 1j]
_bad_values = []
class IntTrait(HasTraits):
value = Int(99)
class TestInt(TraitTestBase):
obj = IntTrait()
_default_value = 99
_good_values = [10, -10]
_bad_values = ['ten', u'ten', [10], {'ten': 10},(10,), None, 1j,
10.1, -10.1, '10L', '-10L', '10.1', '-10.1', u'10L',
u'-10L', u'10.1', u'-10.1', '10', '-10', u'10', u'-10']
if not py3compat.PY3:
_bad_values.extend([long(10), long(-10), 10*sys.maxint, -10*sys.maxint])
class LongTrait(HasTraits):
value = Long(99 if py3compat.PY3 else long(99))
class TestLong(TraitTestBase):
obj = LongTrait()
_default_value = 99 if py3compat.PY3 else long(99)
_good_values = [10, -10]
_bad_values = ['ten', u'ten', [10], {'ten': 10},(10,),
None, 1j, 10.1, -10.1, '10', '-10', '10L', '-10L', '10.1',
'-10.1', u'10', u'-10', u'10L', u'-10L', u'10.1',
u'-10.1']
if not py3compat.PY3:
# maxint undefined on py3, because int == long
_good_values.extend([long(10), long(-10), 10*sys.maxint, -10*sys.maxint])
_bad_values.extend([[long(10)], (long(10),)])
@skipif(py3compat.PY3, "not relevant on py3")
def test_cast_small(self):
"""Long casts ints to long"""
self.obj.value = 10
self.assertEqual(type(self.obj.value), long)
class IntegerTrait(HasTraits):
value = Integer(1)
class TestInteger(TestLong):
obj = IntegerTrait()
_default_value = 1
def coerce(self, n):
return int(n)
@skipif(py3compat.PY3, "not relevant on py3")
def test_cast_small(self):
"""Integer casts small longs to int"""
if py3compat.PY3:
raise SkipTest("not relevant on py3")
self.obj.value = long(100)
self.assertEqual(type(self.obj.value), int)
class FloatTrait(HasTraits):
value = Float(99.0)
class TestFloat(TraitTestBase):
obj = FloatTrait()
_default_value = 99.0
_good_values = [10, -10, 10.1, -10.1]
_bad_values = ['ten', u'ten', [10], {'ten': 10},(10,), None,
1j, '10', '-10', '10L', '-10L', '10.1', '-10.1', u'10',
u'-10', u'10L', u'-10L', u'10.1', u'-10.1']
if not py3compat.PY3:
_bad_values.extend([long(10), long(-10)])
class ComplexTrait(HasTraits):
value = Complex(99.0-99.0j)
class TestComplex(TraitTestBase):
obj = ComplexTrait()
_default_value = 99.0-99.0j
_good_values = [10, -10, 10.1, -10.1, 10j, 10+10j, 10-10j,
10.1j, 10.1+10.1j, 10.1-10.1j]
_bad_values = [u'10L', u'-10L', 'ten', [10], {'ten': 10},(10,), None]
if not py3compat.PY3:
_bad_values.extend([long(10), long(-10)])
class BytesTrait(HasTraits):
value = Bytes(b'string')
class TestBytes(TraitTestBase):
obj = BytesTrait()
_default_value = b'string'
_good_values = [b'10', b'-10', b'10L',
b'-10L', b'10.1', b'-10.1', b'string']
_bad_values = [10, -10, 10.1, -10.1, 1j, [10],
['ten'],{'ten': 10},(10,), None, u'string']
if not py3compat.PY3:
_bad_values.extend([long(10), long(-10)])
class UnicodeTrait(HasTraits):
value = Unicode(u'unicode')
class TestUnicode(TraitTestBase):
obj = UnicodeTrait()
_default_value = u'unicode'
_good_values = ['10', '-10', '10L', '-10L', '10.1',
'-10.1', '', u'', 'string', u'string', u"€"]
_bad_values = [10, -10, 10.1, -10.1, 1j,
[10], ['ten'], [u'ten'], {'ten': 10},(10,), None]
if not py3compat.PY3:
_bad_values.extend([long(10), long(-10)])
class ObjectNameTrait(HasTraits):
value = ObjectName("abc")
class TestObjectName(TraitTestBase):
obj = ObjectNameTrait()
_default_value = "abc"
_good_values = ["a", "gh", "g9", "g_", "_G", u"a345_"]
_bad_values = [1, "", u"€", "9g", "!", "#abc", "aj@", "a.b", "a()", "a[0]",
object(), object]
if sys.version_info[0] < 3:
_bad_values.append(u"þ")
else:
_good_values.append(u"þ") # þ=1 is valid in Python 3 (PEP 3131).
class DottedObjectNameTrait(HasTraits):
value = DottedObjectName("a.b")
class TestDottedObjectName(TraitTestBase):
obj = DottedObjectNameTrait()
_default_value = "a.b"
_good_values = ["A", "y.t", "y765.__repr__", "os.path.join", u"os.path.join"]
_bad_values = [1, u"abc.€", "_.@", ".", ".abc", "abc.", ".abc."]
if sys.version_info[0] < 3:
_bad_values.append(u"t.þ")
else:
_good_values.append(u"t.þ")
class TCPAddressTrait(HasTraits):
value = TCPAddress()
class TestTCPAddress(TraitTestBase):
obj = TCPAddressTrait()
_default_value = ('127.0.0.1',0)
_good_values = [('localhost',0),('192.168.0.1',1000),('www.google.com',80)]
_bad_values = [(0,0),('localhost',10.0),('localhost',-1)]
class ListTrait(HasTraits):
value = List(Int)
class TestList(TraitTestBase):
obj = ListTrait()
_default_value = []
_good_values = [[], [1], list(range(10)), (1,2)]
_bad_values = [10, [1,'a'], 'a']
def coerce(self, value):
if value is not None:
value = list(value)
return value
class LenListTrait(HasTraits):
value = List(Int, [0], minlen=1, maxlen=2)
class TestLenList(TraitTestBase):
obj = LenListTrait()
_default_value = [0]
_good_values = [[1], [1,2], (1,2)]
_bad_values = [10, [1,'a'], 'a', [], list(range(3))]
def coerce(self, value):
if value is not None:
value = list(value)
return value
class TupleTrait(HasTraits):
value = Tuple(Int)
class TestTupleTrait(TraitTestBase):
obj = TupleTrait()
_default_value = None
_good_values = [(1,), None, (0,), [1]]
_bad_values = [10, (1,2), ('a'), ()]
def coerce(self, value):
if value is not None:
value = tuple(value)
return value
def test_invalid_args(self):
self.assertRaises(TypeError, Tuple, 5)
self.assertRaises(TypeError, Tuple, default_value='hello')
t = Tuple(Int, CBytes, default_value=(1,5))
class LooseTupleTrait(HasTraits):
value = Tuple((1,2,3))
class TestLooseTupleTrait(TraitTestBase):
obj = LooseTupleTrait()
_default_value = (1,2,3)
_good_values = [(1,), None, [1], (0,), tuple(range(5)), tuple('hello'), ('a',5), ()]
_bad_values = [10, 'hello', {}]
def coerce(self, value):
if value is not None:
value = tuple(value)
return value
def test_invalid_args(self):
self.assertRaises(TypeError, Tuple, 5)
self.assertRaises(TypeError, Tuple, default_value='hello')
t = Tuple(Int, CBytes, default_value=(1,5))
class MultiTupleTrait(HasTraits):
value = Tuple(Int, Bytes, default_value=[99,b'bottles'])
class TestMultiTuple(TraitTestBase):
obj = MultiTupleTrait()
_default_value = (99,b'bottles')
_good_values = [(1,b'a'), (2,b'b')]
_bad_values = ((),10, b'a', (1,b'a',3), (b'a',1), (1, u'a'))
class CRegExpTrait(HasTraits):
value = CRegExp(r'')
class TestCRegExp(TraitTestBase):
def coerce(self, value):
return re.compile(value)
obj = CRegExpTrait()
_default_value = re.compile(r'')
_good_values = [r'\d+', re.compile(r'\d+')]
_bad_values = [r'(', None, ()]
class DictTrait(HasTraits):
value = Dict()
def test_dict_assignment():
d = dict()
c = DictTrait()
c.value = d
d['a'] = 5
nt.assert_equal(d, c.value)
nt.assert_true(c.value is d)
class TestLink(TestCase):
def test_connect_same(self):
"""Verify two traitlets of the same type can be linked together using link."""
# Create two simple classes with Int traitlets.
class A(HasTraits):
value = Int()
a = A(value=9)
b = A(value=8)
# Conenct the two classes.
c = link((a, 'value'), (b, 'value'))
# Make sure the values are the same at the point of linking.
self.assertEqual(a.value, b.value)
# Change one of the values to make sure they stay in sync.
a.value = 5
self.assertEqual(a.value, b.value)
b.value = 6
self.assertEqual(a.value, b.value)
def test_link_different(self):
"""Verify two traitlets of different types can be linked together using link."""
# Create two simple classes with Int traitlets.
class A(HasTraits):
value = Int()
class B(HasTraits):
count = Int()
a = A(value=9)
b = B(count=8)
# Conenct the two classes.
c = link((a, 'value'), (b, 'count'))
# Make sure the values are the same at the point of linking.
self.assertEqual(a.value, b.count)
# Change one of the values to make sure they stay in sync.
a.value = 5
self.assertEqual(a.value, b.count)
b.count = 4
self.assertEqual(a.value, b.count)
def test_unlink(self):
"""Verify two linked traitlets can be unlinked."""
# Create two simple classes with Int traitlets.
class A(HasTraits):
value = Int()
a = A(value=9)
b = A(value=8)
# Connect the two classes.
c = link((a, 'value'), (b, 'value'))
a.value = 4
c.unlink()
# Change one of the values to make sure they don't stay in sync.
a.value = 5
self.assertNotEqual(a.value, b.value)
def test_callbacks(self):
"""Verify two linked traitlets have their callbacks called once."""
# Create two simple classes with Int traitlets.
class A(HasTraits):
value = Int()
class B(HasTraits):
count = Int()
a = A(value=9)
b = B(count=8)
# Register callbacks that count.
callback_count = []
def a_callback(name, old, new):
callback_count.append('a')
a.on_trait_change(a_callback, 'value')
def b_callback(name, old, new):
callback_count.append('b')
b.on_trait_change(b_callback, 'count')
# Connect the two classes.
c = link((a, 'value'), (b, 'count'))
# Make sure b's count was set to a's value once.
self.assertEqual(''.join(callback_count), 'b')
del callback_count[:]
# Make sure a's value was set to b's count once.
b.count = 5
self.assertEqual(''.join(callback_count), 'ba')
del callback_count[:]
# Make sure b's count was set to a's value once.
a.value = 4
self.assertEqual(''.join(callback_count), 'ab')
del callback_count[:]
|
alephu5/Soundbyte
|
environment/lib/python3.3/site-packages/IPython/utils/tests/test_traitlets.py
|
Python
|
gpl-3.0
| 29,996
|
[
"Brian"
] |
804633b8769280e14293cdaa7d810be9ee27a39ccb6d26d90b1e98ae18ddf585
|
# class generated by DeVIDE::createDeVIDEModuleFromVTKObject
from module_kits.vtk_kit.mixins import SimpleVTKClassModuleBase
import vtk
class vtkProgrammableGlyphFilter(SimpleVTKClassModuleBase):
def __init__(self, module_manager):
SimpleVTKClassModuleBase.__init__(
self, module_manager,
vtk.vtkProgrammableGlyphFilter(), 'Processing.',
('vtkDataSet', 'vtkPolyData'), ('vtkPolyData',),
replaceDoc=True,
inputFunctions=None, outputFunctions=None)
|
nagyistoce/devide
|
modules/vtk_basic/vtkProgrammableGlyphFilter.py
|
Python
|
bsd-3-clause
| 520
|
[
"VTK"
] |
ed03d7bf495ddcab40c9e1bf9055c74752a3e07bc97e83e0dff3eae133422ab6
|
"""Contains the Game class which is the Machine Mode that actually runs and
manages an the game in a pinball machine.
Note that in the Mission Pinball Framework, a distinction is made between a
*game* and a *machine*. A *game* refers to a game in progress, whereas a
*machine* is the physical pinball machine.
"""
# game.py
# Mission Pinball Framework
# Written by Brian Madden & Gabe Knuth
# Released under the MIT License. (See license info at the end of this file.)
# Documentation and more info at http://missionpinball.com/mpf
import logging
from mpf.system.mode import Mode
from mpf.system.player import Player
class Game(Mode):
"""Base mode that runs an active game on a pinball machine.
Responsible for creating players, starting and ending balls, rotating to
the next player, etc.
"""
def __init__(self, machine, config, name, path):
super(Game, self).__init__(machine, config, name, path)
self._balls_in_play = 0
self.player_list = list()
self.machine.game = None
self.tilted = False
self.player = None
@property
def balls_in_play(self):
return self._balls_in_play
@balls_in_play.setter
def balls_in_play(self, value):
prev_balls_in_play = self._balls_in_play
if value > self.machine.ball_controller.num_balls_known:
self._balls_in_play = self.machine.ball_controller.num_balls_known
elif value < 0:
self._balls_in_play = 0
else:
self._balls_in_play = value
self.log.debug("Balls in Play change. New value: %s, (Previous: %s)",
self._balls_in_play, prev_balls_in_play)
if self._balls_in_play > 0:
self.machine.events.post('balls_in_play',
balls=self._balls_in_play)
if prev_balls_in_play and not self._balls_in_play:
self.ball_ending()
def mode_start(self, buttons=None, hold_time=None, **kwargs):
"""Automatically called when the *Game* machine mode becomes active."""
if buttons:
self.buttons_held_on_start = buttons
if hold_time:
self.start_button_hold_time = hold_time
# Intialize variables
self.num_players = 0
self.player = None
self.player_list = list()
self.machine.game = self
self.tilted = False
self._balls_in_play = 0
# todo register for request_to_start_game so you can deny it, or allow
# it with a long press
self.add_mode_event_handler('player_add_success',
self.player_add_success)
if self.machine.config['game']['add_player_switch_tag']:
self.add_mode_event_handler(
self.machine.config['mpf']['switch_tag_event'].replace('%',
self.machine.config['game']['add_player_switch_tag']),
self.request_player_add)
self.add_mode_event_handler('ball_ended', self.ball_ended)
self.add_mode_event_handler('game_ended', self.game_ended)
if ('restart on long press' in self.machine.config['game'] and
self.machine.config['game']['restart on long press']):
self.setup_midgame_restart()
self.machine.events.post('enable_volume_keys')
self.machine.events.post_queue('game_starting',
callback=self.game_started, game=self)
def mode_stop(self, **kwargs):
self.machine.game = None
def setup_midgame_restart(self, tag='start', time='1s', min_ball=0):
"""Allows a long button press to restart the game."""
pass
'''
self.min_restart_ball = min_ball
for switch in self.machine.switches.items_tagged(tag):
self.switch_handlers.append(
self.machine.switch_controller.add_switch_handler(
switch_name=switch.name,
callback=self._midgame_restart_handler,
state=1,
ms=Timing.string_to_ms(time))
)
'''
def _midgame_restart_handler(self, **kwargs):
if self.player and self.player.ball > self.min_restart_ball:
self.log.debug("------Restarting game via long button press------")
# todo this should post the request to start game event first
def game_started(self, ev_result=True, **kwargs):
"""All the modules that needed to do something on game start are done,
so our game is officially 'started'.
"""
if ev_result:
self.machine.remove_machine_var_search(startswith='player',
endswith='_score')
if not self.player_list:
# Sometimes game_starting handlers will add players, so we only
# have to here if there aren't any players yet.
self._player_add()
self.machine.events.post('game_started')
self.player_turn_start()
else: # something canceled the game start
self.game_ending()
def player_add_success(self, player, **kwargs):
"""Called when a new player is successfully added to the current game
(including when the first player is added).
"""
self.log.info("Player added successfully. Total players: %s",
self.num_players)
if self.num_players == 2:
self.machine.events.post('multiplayer_game')
def ball_starting(self):
"""Called when a new ball is starting.
Note this method is called for each ball that starts, even if it's
after a Shoot Again scenario for the same player.
Posts a queue event called *ball_starting*, giving other modules the
opportunity to do things before the ball actually starts. Once that
event is clear, this method calls :meth:`ball_started`.
"""
self.log.info("***************************************************")
self.log.info("****************** BALL STARTING ******************")
self.log.info("** **")
self.log.info("** Player: {} Ball: {} Score: {}".format(
self.player.number, self.player.ball,
self.player.score).ljust(49) + '**')
self.log.info("** **")
self.log.info("***************************************************")
self.log.info("***************************************************")
self.machine.events.post_queue('ball_starting',
callback=self.ball_started)
def ball_started(self, ev_result=True):
self.log.debug("Game Machine Mode ball_started()")
"""Called when the other modules have approved a ball start.
Mainly used to enable the AutoFire coil rules, like enabling the
flippers and bumpers.
"""
if ev_result is False:
return
# todo what happens if this fails? I mean it shouldn't, but if
# any ball_starting handler returns False, it will fail and we'll
# be in limbo?
self.log.debug("ball_started for Ball %s", self.player.ball)
# register handlers to watch for ball drain and live ball removed
self.add_mode_event_handler('ball_drain', self.ball_drained)
self.balls_in_play = 1
self.machine.events.post('ball_started', ball=self.player.ball,
player=self.player.number)
if self.num_players == 1:
self.machine.events.post('single_player_ball_started')
else:
self.machine.events.post('multi_player_ball_started')
self.machine.events.post(
'player_{}_ball_started'.format(self.player.number))
self.machine.playfield.add_ball(player_controlled=True)
def ball_drained(self, balls=0, **kwargs):
self.log.debug("Entering Game.ball_drained()")
if balls:
self.log.debug("Processing %s newly-drained ball(s)", balls)
self.balls_in_play -= balls
return {'balls': balls}
def ball_ending(self):
"""Starts the ball ending process.
This method posts the queue event *ball_ending*, giving other modules
an opportunity to finish up whatever they need to do before the ball
ends. Once all the registered handlers for that event have finished,
this method calls :meth:`ball_ended`.
Currently this method also disables the autofire_coils and flippers,
though that's temporary as we'll move those into config file options.
"""
# remove the handlers that were looking for ball drain since they'll
# be re-added on next ball start
self.machine.events.remove_handler(self.ball_drained)
# todo should clean up the above since they are removed from the
# active list of handlers but not the registered_handlers list.
# It doesn't really matter since the game ending can just remove them
# all, but technically it's not clean.
self._balls_in_play = 0
# todo everything below is hard coded temporary
self.log.debug("Entering Game.ball_ending()")
self.machine.events.post_queue('ball_ending',
callback=self._ball_ending_done)
def _ball_ending_done(self, **kwargs):
# Callback for when the ball_ending queue is clear. All this does is
# post ball_ended, but we do it this way so that ball_ended slots in
# properly after other existing events have been posted.
self.machine.events.post('ball_ended')
def ball_ended(self, ev_result=True, **kwargs):
"""Called when the ball has successfully ended.
This method is called after all the registered handlers of the queue
event *ball_ended* finish. (So typically this means that animations
have finished, etc.)
This method also decides if the same player should shoot again (if
there's an extra ball) or whether the machine controller should rotate
to the next player. It will also end the game if all players and balls
are done.
"""
self.log.debug("Entering Game.ball_ended()")
if ev_result is False:
return
if self.player.extra_balls:
self.shoot_again()
return
if (self.player.ball == self.machine.config['game']['balls_per_game']
and self.player.number == self.num_players):
self.game_ending()
else:
self.player_rotate()
self.player_turn_start()
def game_ending(self):
"""Called when the game decides it should end.
This method posts the queue event *game_ending*, giving other modules
an opportunity to finish up whatever they need to do before the game
ends. Once all the registered handlers for that event have finished,
this method calls :meth:`game_end`.
"""
self.log.debug("Entering Game.game_ending()")
self.machine.events.post_queue('game_ending',
callback=self._game_ending_done)
def _game_ending_done(self, **kwargs):
# Callback for when the game_ending queue is clear. All this does is
# post game_ended, but we do it this way so that game_ended slots in
# properly after other existing events have been posted.
self.player_turn_stop()
self.machine.events.post('game_ended')
def game_ended(self, **kwargs):
"""Actually ends the game once the *game_ending* event is clear.
Eventually this method will do lots of things. For now it just
advances the machine flow which ends the :class:`Game` mode and starts the
:class:`Attract` mode.
"""
self.log.debug("Entering Game.game_ended()")
def award_extra_ball(self, num=1, force=False):
"""Awards the player an extra ball.
Args:
num: Integer of the number of extra balls to award. Default is 1.
force: Boolean which allows you to force the extra ball even if it
means the player would go above the max extra balls specified
in the config files. Default is False.
TODO: The limit checking is not yet implemented
"""
self.log.debug("Entering Game.award_extra_ball()")
self.player.extra_balls += num
self.machine.events.post('extra_ball_awarded')
# todo add the limit checking
def shoot_again(self):
"""Called when the same player should shoot again."""
self.log.debug("Player %s Shoot Again", self.player.index + 1)
if self.player.extra_balls > 0:
self.player.extra_balls -= 1
self.ball_starting()
def set_balls_in_play(self, balls):
"""Sets the number of balls in play to the value passed.
Args:
balls: Int of the new value of balls in play.
This method does not actually eject any new balls onto the playfield,
rather, it just changes the game controller's count of the number of
balls in play.
The balls in play value cannot be lower than 0 or higher than
the number of balls known. This message will automatically set the balls
in play to the nearest valid value if it's outside of this range.
If balls in play drops to zero, ``ball_ending()`` will be called.
"""
self.balls_in_play = balls
def add_balls_in_play(self, balls=1):
"""Adds one or more balls to the current balls in play value.
Args:
balls: Int of the balls to add.
This method does not actually eject any new balls onto the playfield,
rather, it just changes the game controller's count of the number of
balls in play.
Note that if the number of balls added exceeds the number of balls
known, it will be set to the number of balls known.
"""
self.balls_in_play += balls
def remove_balls_in_play(self, balls=1):
"""Removes one or more balls from the current balls in play value.
Args:
balls: Int of the balls to add.
Note that if the number of balls removed would take the current balls in
play count to less than zero, the number of balls in play will be set to
zero.
If balls in play drops to zero, ``ball_ending()`` will be called.
"""
self.balls_in_play -= balls
def request_player_add(self, **kwargs):
"""Called by any module that wants to add a player to an active game.
This method contains the logic to verify whether it's ok to add a
player. (For example, the game must be on ball 1 and the current
number of players must be less than the max number allowed.)
Assuming this method believes it's ok to add a player, it posts the
boolean event *player_add_request* to give other modules the opportunity
to deny it. (For example, a credits module might deny the request if
there are not enough credits in the machine.)
If *player_add_request* comes back True, the event
*player_add_success* is posted with a reference to the new player
object as a *player* kwarg.
"""
self.log.debug("Received request to add player.")
# There area few things we have to check first. If this all passes,
# then we'll raise the event to ask other modules if it's ok to add a
# player
if len(self.player_list) >= self.machine.config['game']\
['max_players']:
self.log.debug("Game is at max players. Cannot add another.")
return False
if self.player and self.player.ball > 1: # todo config setting
self.log.debug("Current ball is after Ball 1. Cannot add player.")
return False
return self.machine.events.post_boolean('player_add_request',
callback=self._player_add)
def _player_add(self, ev_result=True):
# This is the callback from our request player add event.
# Don't call it directly.
if ev_result is False:
self.log.debug("Request to add player has been denied.")
return False
else:
player = Player(self.machine, self.player_list)
self.num_players = len(self.player_list)
self.machine.create_machine_var(
name='player_{}_score'.format(player.number),
value=player.score,
persist=True)
return player
def player_turn_start(self):
"""Called at the beginning of a player's turn.
Note this method is only called when a new player is first up. So if
the same player shoots again due to an extra ball, this method is not
called again.
"""
# If we get a request to start a turn but we haven't done a rotate to
# set the first player, do that now.
if not self.player:
self.player_rotate()
self.machine.events.post('player_turn_start', player=self.player,
number=self.player.number,
callback=self._player_turn_started)
def player_turn_stop(self):
if not self.player:
return
self.machine.events.post('player_turn_stop', player=self.player,
number=self.player.number)
self.machine.set_machine_var(name='player' + str(self.player.number) +
'_score', value=self.player.score)
if self.player.number < self.num_players:
self.player = self.player_list[self.player.number]
# Note the above line is kind of confusing but it works because
# the current player number is always 1 more than the index.
# i.e. "Player 1" has an index of 0, etc. So using the current
# player number as the next player's index works out.
else:
self.player = self.player_list[0]
def _player_turn_started(self, **kwargs):
self.player.ball += 1
self.ball_starting()
def player_rotate(self, player_num=None):
"""Rotates the game to the next player.
This method is called after a player's turn is over, so it's even used
in single-player games between balls.
All it does really is set :attr:`player` to the next player's number.
Args:
player_num : Int which lets you specify which player you want to
rotate to. If None, it just rotates to the next player in order.
"""
# todo do cool stuff in the future to change order, etc.
if self.player:
self.player_turn_stop()
else: # no current player, grab the first one
self.player = self.player_list[0]
self.log.debug("Player rotate: Now up is Player %s", self.player.number)
# todo player events should come next, including tracking inc/dec, other values
# The MIT License (MIT)
# Copyright (c) 2013-2015 Brian Madden and Gabe Knuth
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
|
qcapen/mpf
|
mpf/modes/game/code/game.py
|
Python
|
mit
| 20,544
|
[
"Brian"
] |
a634560e883ae214f835e2da1a30c60de99f0a7acb3337ccd0de51f562f09985
|
'''
Example script illustrating plotting of PLY data using Mayavi. Mayavi
is not a dependency of plyfile, but you will need to install it in order
to run this script. Failing to do so will immediately result in
ImportError.
'''
from argparse import ArgumentParser
import numpy
from mayavi import mlab
from plyfile import PlyData
def main():
parser = ArgumentParser()
parser.add_argument('ply_filename')
args = parser.parse_args()
mlab.figure(bgcolor=(0, 0, 0))
plot(PlyData.read(args.ply_filename))
mlab.show()
def plot(ply):
'''
Plot vertices and triangles from a PlyData instance. Assumptions:
`ply' has a 'vertex' element with 'x', 'y', and 'z'
properties;
`ply' has a 'face' element with an integral list property
'vertex_indices', all of whose elements have length 3.
'''
vertex = ply['vertex']
(x, y, z) = (vertex[t] for t in ('x', 'y', 'z'))
mlab.points3d(x, y, z, color=(1, 1, 1), mode='point')
if 'face' in ply:
tri_idx = ply['face']['vertex_indices']
triangles = numpy.vstack(tri_idx)
mlab.triangular_mesh(x, y, z, triangles,
color=(1, 0, 0.4), opacity=0.5)
main()
|
dranjan/python-plyfile
|
examples/plot.py
|
Python
|
gpl-3.0
| 1,239
|
[
"Mayavi"
] |
bba95fb4b7bb2a7dad24282c97322a68d78d83eba74a275529e595894c80a8e7
|
#
# Copyright (C) 2013,2014 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Tests particle property setters/getters
import unittest as ut
import espressomd
import espressomd._system as es
import numpy as np
from espressomd.interactions import FeneBond
class ParticleProperties(ut.TestCase):
# def __init__(self,particleId):
# self.pid=particleId
# Particle id to work on
pid=17
# Error tolerance when comparing arrays/tuples...
tol=1E-9
def arraysNearlyEqual(self,a,b):
"""Test, if the magnitude of the difference between two arrays is smaller than the tolerance"""
# Check length
if len(a) != len(b):
return False
# We have to use a loop, since we can't be sure, we're getting numpy arrays
sum=0.
for i in range(len(a)):
sum+= abs(a[i]-b[i])
if sum >self.tol:
return False
return True
def setUp(self):
bla = es.System
print bla.doge
bla.bondedInter[0]=FeneBond(k=1,d_r_max=5)
es.System.bondedInter[0]=FeneBond(k=1,d_r_max=5)
es.System.bondedInter[1]=FeneBond(k=1,d_r_max=5)
def generateTestForVectorProperty(_propName,_value):
"""Generates test cases for vectorial particle properties such as
position, velocity...
1st arg: name of the property (e.g., "pos"),
2nd array: value to be used for testing. Has to be numpy.array of floats
"""
# This is executed, when generateTestForVectorProperty() is called
propName=_propName
value=_value
def func(self):
# This code is run at the execution of the generated function.
# It will use the state of the variables in the outer function,
# which was there, when the outer function was called
setattr(es.System.part[self.pid],propName,value)
print(propName,value,getattr(es.System.part[self.pid],propName))
self.assertTrue(self.arraysNearlyEqual(getattr(es.System.part[self.pid],propName), value),propName+": value set and value gotten back differ.")
return func
def generateTestForScalarProperty(_propName,_value):
"""Generates test cases for scalar particle properties such as
type, mass, charge...
1st arg: name of the property (e.g., "type"),
2nd array: value to be used for testing. int or float
"""
# This is executed, when generateTestForVectorProperty() is called
propName=_propName
value=_value
def func(self):
# This code is run at the execution of the generated function.
# It will use the state of the variables in the outer function,
# which was there, when the outer function was called
setattr(es.System.part[self.pid],propName,value)
print(propName,value,getattr(es.System.part[self.pid],propName))
self.assertTrue(getattr(es.System.part[self.pid],propName)==value,propName+": value set and value gotten back differ.")
return func
test_pos=generateTestForVectorProperty("pos",np.array([0.1,0.2,0.3]))
test_v=generateTestForVectorProperty("v",np.array([0.2,0.3,0.4]))
test_f=generateTestForVectorProperty("f",np.array([0.2,0.3,0.7]))
test_type=generateTestForScalarProperty("type",int(3))
test_bonds_property=generateTestForScalarProperty("bonds", ((0,1),(1,2)))
if "MASS" in es.code_info.features():
test_mass=generateTestForScalarProperty("mass",1.3)
if "ROTATION" in es.code_info.features():
test_omega_lab=generateTestForVectorProperty("omega_lab",np.array([4.,2.,1.]))
test_omega_body=generateTestForVectorProperty("omega_body",np.array([4.,72.,1.]))
test_torque_lab=generateTestForVectorProperty("torque_lab",np.array([4.,72.,3.7]))
# The tested value has to be nromalized!
test_quat=generateTestForVectorProperty("quat",np.array([0.5,0.5,0.5,0.5]))
# test_director=generateTestForVectorProperty("director",np.array([0.5,0.4,0.3]))
if "ELECTROSTATICS" in es.code_info.features():
test_charge=generateTestForScalarProperty("q",-19.7)
if "DIPOLES" in es.code_info.features():
test_dip=generateTestForVectorProperty("dip",np.array([0.5,-0.5,3]))
test_dipm=generateTestForScalarProperty("dipm",-9.7)
if "VIRTUAL_SITES" in es.code_info.features():
test_virtual=generateTestForScalarProperty("virtual",1)
if "VIRTUAL_SITES_RELATIVE" in es.code_info.features():
test_zz_vs_relative=generateTestForScalarProperty("vs_relative",((0,5.0)))
if __name__ == "__main__":
print("Features: ",es.code_info.features())
ut.main()
|
KKleinbeck/Espresso-Personal
|
testsuite/python/particle.py
|
Python
|
gpl-3.0
| 5,092
|
[
"ESPResSo"
] |
d43c161c1e49a21dae40620954924a00823924356901d4c97e599645a6f0790b
|
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import messages
from crystal_dashboard.api import policies as api
from crystal_dashboard.dashboards.crystal import common
from crystal_dashboard.dashboards.crystal import exceptions as sdsexception
from openstack_dashboard import api as api_keystone
class CreateAccessControlPolicy(forms.SelfHandlingForm):
project_choices = []
project_id = forms.ChoiceField(choices=project_choices,
label=_("Project"),
help_text=_("The project where the rule will be applied."),
required=True)
container_choices = [('', 'None')]
container_id = forms.CharField(label=_("Container"),
help_text=_("The container where the rule will be applied."),
required=False,
widget=forms.Select(choices=container_choices))
users_choices = [('', 'None')]
identity = forms.CharField(label=_("User/Group"),
help_text=_("The user or group where the rule will be applied."),
required=True,
widget=forms.Select(choices=users_choices))
access = forms.ChoiceField(
label=_('Level of access'),
choices=[('list', _('List')),
('read', _('Read-only')),
('read-write', _('Read and Write'))],
initial='list'
)
object_type_choices = []
object_type = forms.ChoiceField(choices=object_type_choices,
label=_("Read Condition: Object Type"),
help_text=_("The type of object the rule will be applied to."),
required=False)
object_tag = forms.CharField(max_length=255,
label=_("Read Condition: Object Tag"),
required=False,
help_text=_("The metadata tag of object the rule will be applied to."))
def __init__(self, request, *args, **kwargs):
# Obtain list of projects
self.project_choices = [('', 'Select one'), common.get_project_list_choices(request)]
self.object_type_choices = common.get_object_type_choices(request)
# Initialization
super(CreateAccessControlPolicy, self).__init__(request, *args, **kwargs)
# Overwrite project_id input form
self.fields['project_id'] = forms.ChoiceField(choices=self.project_choices,
label=_("Project"),
help_text=_("The project where the rule will be apply."),
required=True)
self.fields['object_type'] = forms.ChoiceField(choices=self.object_type_choices,
label=_("Read Condition: Object Type"),
help_text=_("The type of object the rule will be applied to."),
required=False)
@staticmethod
def handle(request, data):
try:
response = api.create_access_control_policy(request, data)
if 200 <= response.status_code < 300:
messages.success(request, _('Successfully created access control policy'))
return data
else:
raise ValueError(response.text)
except Exception as ex:
redirect = reverse("horizon:crystal:policies:index")
error_message = "Unable to create access control policy.\t %s" % ex.message
exceptions.handle(request, _(error_message), redirect=redirect)
class UpdateAccessControlPolicy(forms.SelfHandlingForm):
access = forms.ChoiceField()
object_type_choices = []
object_type = forms.ChoiceField(choices=object_type_choices,
label=_("Read Condition: Object Type"),
help_text=_("The type of object the rule will be applied to."),
required=False)
object_tag = forms.CharField(max_length=255,
label=_("Read Condition: Object Tag"),
required=False,
help_text=_("The metadata tag of object the rule will be applied to."))
def __init__(self, request, *args, **kwargs):
super(UpdateAccessControlPolicy, self).__init__(request, *args, **kwargs)
initial_value = ''
if self.initial['read'] and self.initial['write']:
initial_value = 'read-write'
elif self.initial['read']:
initial_value = 'read'
elif self.initial['list']:
initial_value = 'list'
self.fields['access'] = forms.ChoiceField(
label=_('Level of access'),
choices=[('list', _('List')),
('read', _('Read-only')),
('read-write', _('Read and Write'))],
initial=initial_value
)
self.object_type_choices = common.get_object_type_choices(request)
self.fields['object_type'] = forms.ChoiceField(choices=self.object_type_choices,
label=_("Read Condition: Object Type"),
help_text=_("The type of object the rule will be applied to."),
required=False)
def handle(self, request, data):
try:
acl_id = self.initial["policy_id"]
response = api.update_access_control_policy(request, data, acl_id)
if 200 > response.status_code >= 300:
raise sdsexception.SdsException(response)
else:
messages.success(request, _('Successfully updated policy: %s') % self.initial['policy_id'])
return data
except Exception as ex:
redirect = reverse("horizon:crystal:policies:index")
error_message = "Unable to update ACL.\t %s" % ex.message
exceptions.handle(request, _(error_message), redirect=redirect)
|
Crystal-SDS/dashboard
|
crystal_dashboard/dashboards/crystal/policies/access_control/forms.py
|
Python
|
gpl-3.0
| 6,503
|
[
"CRYSTAL"
] |
20aa044c4109611615ac516b7a07dd7dd05fa3a5ec220608fa1d0782d5888ffc
|
"""Test OpenBabel executables from Python
Note: Python bindings not used
On Windows or Linux, you can run these tests at the commandline
in the build folder with:
"C:\Program Files\CMake 2.6\bin\ctest.exe" -C CTestTestfile.cmake
-R pytest -VV
You could also "chdir" into build/test and run the test file directly:
python ../../../test/testsmartssym.py
In both cases, the test file is run directly from the source folder,
and so you can quickly develop the tests and try them out.
"""
import os
import unittest
import pdb
from testbabel import run_exec, executable, log, BaseTest
def checkmatch(query, molecules):
result = []
for smi in molecules:
output, error = run_exec("obabel -:%s -s%s -osmi" % (smi, query))
result.append(output.strip() != "")
return result
def fastcheckmatch(query, molecules):
"""May fail where Open Babel does not output the input query, e.g.
[C@@]([H])(Br)(Cl)I is output as [C@@H](Br)(Cl)I"""
output, error = run_exec("\n".join(molecules), "obabel -ismi -s%s -osmi" % query)
converted = [x.rstrip() for x in output.split("\n")]
results = [smi in converted for smi in molecules]
return results
class TestSmartsSym(BaseTest):
"""Base class for a series of tests relating to symmetry"""
def testSelfMatch(self):
"""Verify that a molecule matches itself"""
data = [
'[C@@](F)(Br)(Cl)I',
'[C@](F)(Br)(Cl)I',
'F[C@](Br)(Cl)I',
'[C@H](Br)(Cl)I',
'Br[C@H](Cl)I',
'[C@]1(Br)(Cl)NC1',
'[C@@]1(Br)(Cl)NC1',
'Br[C@]1(Cl)NC1',
'C1N[C@]1(Cl)Br',
'F[C@]1(Br)N[C@]1(Br)Cl',
'[C@H]1(Cl)NC1'
]
for smi in data:
output, error = run_exec("obabel -:%s -s%s -osmi" % (smi, smi))
self.assertEqual(output.rstrip(), smi)
def testTetStereo(self):
data = ['[C@@](F)(Br)(Cl)I',
'[C@](F)(Br)(Cl)I',
'F[C@](Br)(Cl)I',
'F[C@@](Br)(Cl)I',
'C(F)(Br)(Cl)I',
'FC(Br)(Cl)I']
self.assertEqual(fastcheckmatch(data[0], data[0:6]),
[True, False, False, True, False, False])
self.assertEqual(fastcheckmatch(data[2], data[0:6]),
[False, True, True, False, False, False])
self.assertEqual(fastcheckmatch(data[4], data[0:6]), [True]*6)
def testTetStereoImplicitH(self):
data = ['[C@H](Br)(Cl)I',
'[C@@H](Br)(Cl)I',
'Br[C@H](Cl)I',
'Br[C@@H](Cl)I',
'BrC(Cl)I',
'BrC([H])(Cl)I',
'Br[C@@]([H])(Cl)I'
]
self.assertEqual(checkmatch(data[0], data[0:7]),
[True, False, False, True, False, False, True])
self.assertEqual(checkmatch(data[2], data[0:7]),
[False, True, True, False, False, False, False])
self.assertEqual(checkmatch(data[4], data[0:7]), [True]*7)
self.assertEqual(checkmatch(data[6], data[0:7]),
[True, False, False, True, False, False, True])
def testRingClosures(self):
data = ['[C@]1(Br)(Cl)NC1',
'[C@@]1(Br)(Cl)NC1',
'Br[C@]1(Cl)NC1',
'Br[C@@]1(Cl)NC1',
'C1N[C@]1(Cl)Br',
'C1NC1(Cl)Br']
self.assertEqual(fastcheckmatch(data[0], data[0:6]),
[True, False, False, True, True, False])
self.assertEqual(fastcheckmatch(data[2], data[0:6]),
[False, True, True, False, False, False])
self.assertEqual(fastcheckmatch(data[5], data[0:6]), [True]*6)
if __name__ == "__main__":
testsuite = []
allclasses = [TestSmartsSym]
for myclass in allclasses:
suite = unittest.TestLoader().loadTestsFromTestCase(myclass)
testsuite.append(suite)
unittest.TextTestRunner().run(unittest.TestSuite(testsuite))
|
torcolvin/openbabel
|
test/testsmartssym.py
|
Python
|
gpl-2.0
| 4,251
|
[
"Open Babel"
] |
331995a719ce4280e71c3e3d56a9b2dd24281a4994da4c8f6568773f74cc1d97
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import time
# !! This is the configuration of Nikola. !! #
# !! You should edit it to your liking. !! #
# ! Some settings can be different in different languages.
# ! A comment stating (translatable) is used to denote those.
# ! There are two ways to specify a translatable setting:
# ! (a) BLOG_TITLE = "My Blog"
# ! (b) BLOG_TITLE = {"en": "My Blog", "es": "Mi Blog"}
# ! Option (a) is used when you don't want that setting translated.
# ! Option (b) is used for settings that are different in different languages.
# Data about this site
BLOG_AUTHOR = "dongweiming" # (translatable)
BLOG_TITLE = "Diving into IPython notebook" # (translatable)
# This is the main URL for your site. It will be used
# in a prominent link
SITE_URL = "divingintoipynb.github.io"
# This is the URL where Nikola's output will be deployed.
# If not set, defaults to SITE_URL
# BASE_URL = "divingintoipynb.github.io"
BLOG_EMAIL = "ciici123@gmail.com"
BLOG_DESCRIPTION = "Diving into IPython notebook" # (translatable)
# Nikola is multilingual!
#
# Currently supported languages are:
#
# en English
# ar Arabic
# bg Bulgarian
# ca Catalan
# cs Czech [ALTERNATIVELY cz]
# da Danish
# de German
# el Greek [NOT gr]
# eo Esperanto
# es Spanish
# et Estonian
# eu Basque
# fa Persian
# fi Finnish
# fr French
# hi Hindi
# hr Croatian
# id Indonesian
# it Italian
# ja Japanese [NOT jp]
# ko Korean
# nb Norwegian Bokmål
# nl Dutch
# pl Polish
# pt_br Portuguese (Brasil)
# ru Russian
# sk Slovak
# sl Slovene
# sr Serbian (Cyrillic)
# sv Swedish
# tr Turkish [NOT tr_TR]
# ur Urdu
# zh_cn Chinese (Simplified)
#
# If you want to use Nikola with a non-supported language you have to provide
# a module containing the necessary translations
# (cf. the modules at nikola/data/themes/base/messages/).
# If a specific post is not translated to a language, then the version
# in the default language will be shown instead.
# What is the default language?
DEFAULT_LANG = "en"
# What other languages do you have?
# The format is {"translationcode" : "path/to/translation" }
# the path will be used as a prefix for the generated pages location
TRANSLATIONS = {
DEFAULT_LANG: "",
# Example for another language:
# "es": "./es",
}
# What will translated input files be named like?
# If you have a page something.rst, then something.pl.rst will be considered
# its Polish translation.
# (in the above example: path == "something", ext == "rst", lang == "pl")
# this pattern is also used for metadata:
# something.meta -> something.pl.meta
TRANSLATIONS_PATTERN = "{path}.{lang}.{ext}"
# Links for the sidebar / navigation bar. (translatable)
# This is a dict. The keys are languages, and values are tuples.
#
# For regular links:
# ('http://getnikola.com/', 'Nikola Homepage')
#
# For submenus:
# (
# (
# ('http://apple.com/', 'Apple'),
# ('http://orange.com/', 'Orange'),
# ),
# 'Fruits'
# )
#
# WARNING: Support for submenus is theme-dependent.
# Only one level of submenus is supported.
# WARNING: Some themes, including the default Bootstrap 3 theme,
# may present issues if the menu is too large.
# (in bootstrap3, the navbar can grow too large and cover contents.)
# WARNING: If you link to directories, make sure to follow
# ``STRIP_INDEXES``. If it’s set to ``True``, end your links
# with a ``/``, otherwise end them with ``/index.html`` — or
# else they won’t be hilighted when active.
NAVIGATION_LINKS = {
DEFAULT_LANG: (
('/about/', 'About', 'icon-coffee'),
('/archive/', 'Archive', 'icon-book'),
('mailto:%s' % BLOG_EMAIL, 'Email', 'icon-envelope'),
('http://twitter.com/dongweiming',
'Twitter', 'icon-twitter'),
('http://github.com/dongweiming',
'Github', 'icon-github-alt'),
#('/categories/index.html', 'Tags'),
('/rss.xml', 'Rss', 'icon-rss'),
),
}
# Name of the theme to use.
THEME = "zen-ipython"
# Below this point, everything is optional
# Post's dates are considered in UTC by default, if you want to use
# another time zone, please set TIMEZONE to match. Check the available
# list from Wikipedia:
# http://en.wikipedia.org/wiki/List_of_tz_database_time_zones
# (eg. 'Europe/Zurich')
# Also, if you want to use a different time zone in some of your posts,
# you can use the ISO 8601/RFC 3339 format (ex. 2012-03-30T23:00:00+02:00)
TIMEZONE = "Asia/Harbin"
# If you want to use ISO 8601 (also valid RFC 3339) throughout Nikola
# (especially in new_post), set this to True.
# Note that this does not affect DATE_FORMAT.
# FORCE_ISO8601 = False
# Date format used to display post dates.
# (str used by datetime.datetime.strftime)
# DATE_FORMAT = '%Y-%m-%d %H:%M'
# Date format used to display post dates, if local dates are used.
# (str used by moment.js)
# JS_DATE_FORMAT = 'YYYY-MM-DD HH:mm'
# Date fanciness.
#
# 0 = using DATE_FORMAT and TIMEZONE
# 1 = using JS_DATE_FORMAT and local user time (via moment.js)
# 2 = using a string like “2 days ago”
#
# Your theme must support it, bootstrap and bootstrap3 already do.
# DATE_FANCINESS = 0
# While Nikola can select a sensible locale for each language,
# sometimes explicit control can come handy.
# In this file we express locales in the string form that
# python's locales will accept in your OS, by example
# "en_US.utf8" in unix-like OS, "English_United States" in Windows.
# LOCALES = dict mapping language --> explicit locale for the languages
# in TRANSLATIONS. You can ommit one or more keys.
# LOCALE_FALLBACK = locale to use when an explicit locale is unavailable
# LOCALE_DEFAULT = locale to use for languages not mentioned in LOCALES; if
# not set the default Nikola mapping is used.
# POSTS and PAGES contains (wildcard, destination, template) tuples.
#
# The wildcard is used to generate a list of reSt source files
# (whatever/thing.txt).
#
# That fragment could have an associated metadata file (whatever/thing.meta),
# and optionally translated files (example for spanish, with code "es"):
# whatever/thing.es.txt and whatever/thing.es.meta
#
# This assumes you use the default TRANSLATIONS_PATTERN.
#
# From those files, a set of HTML fragment files will be generated:
# cache/whatever/thing.html (and maybe cache/whatever/thing.html.es)
#
# These files are combined with the template to produce rendered
# pages, which will be placed at
# output / TRANSLATIONS[lang] / destination / pagename.html
#
# where "pagename" is the "slug" specified in the metadata file.
#
# The difference between POSTS and PAGES is that POSTS are added
# to feeds and are considered part of a blog, while PAGES are
# just independent HTML pages.
#
POSTS = (
("posts/*.rst", "posts", "post.tmpl"),
("posts/*.txt", "posts", "post.tmpl"),
("posts/*.ipynb", "posts", "post.tmpl"),
)
PAGES = (
("stories/*.rst", "stories", "story.tmpl"),
("stories/*.txt", "stories", "story.tmpl"),
("stories/*.ipynb", "stories", "story.tmpl"),
)
# One or more folders containing files to be copied as-is into the output.
# The format is a dictionary of {source: relative destination}.
# Default is:
# FILES_FOLDERS = {'files': ''}
# Which means copy 'files' into 'output'
# One or more folders containing listings to be processed and stored into
# the output. The format is a dictionary of {source: relative destination}.
# Default is:
# LISTINGS_FOLDERS = {'listings': 'listings'}
# Which means process listings from 'listings' into 'output/listings'
# A mapping of languages to file-extensions that represent that language.
# Feel free to add or delete extensions to any list, but don't add any new
# compilers unless you write the interface for it yourself.
#
# 'rest' is reStructuredText
# 'markdown' is MarkDown
# 'html' assumes the file is html and just copies it
COMPILERS = {
"rest": ('.rst', '.txt'),
"markdown": ('.md', '.mdown', '.markdown'),
"textile": ('.textile',),
"txt2tags": ('.t2t',),
"bbcode": ('.bb',),
"wiki": ('.wiki',),
"ipynb": ('.ipynb',),
"html": ('.html', '.htm'),
# PHP files are rendered the usual way (i.e. with the full templates).
# The resulting files have .php extensions, making it possible to run
# them without reconfiguring your server to recognize them.
"php": ('.php',),
# Pandoc detects the input from the source filename
# but is disabled by default as it would conflict
# with many of the others.
# "pandoc": ('.rst', '.md', '.txt'),
}
# Create by default posts in one file format?
# Set to False for two-file posts, with separate metadata.
# ONE_FILE_POSTS = True
# If this is set to True, the DEFAULT_LANG version will be displayed for
# untranslated posts.
# If this is set to False, then posts that are not translated to a language
# LANG will not be visible at all in the pages in that language.
# Formerly known as HIDE_UNTRANSLATED_POSTS (inverse)
# SHOW_UNTRANSLATED_POSTS = True
# Nikola supports logo display. If you have one, you can put the URL here.
# Final output is <img src="LOGO_URL" id="logo" alt="BLOG_TITLE">.
# The URL may be relative to the site root.
# LOGO_URL = ''
# If you want to hide the title of your website (for example, if your logo
# already contains the text), set this to False.
# SHOW_BLOG_TITLE = True
# Writes tag cloud data in form of tag_cloud_data.json.
# Warning: this option will change its default value to False in v8!
WRITE_TAG_CLOUD = True
# Paths for different autogenerated bits. These are combined with the
# translation paths.
# Final locations are:
# output / TRANSLATION[lang] / TAG_PATH / index.html (list of tags)
# output / TRANSLATION[lang] / TAG_PATH / tag.html (list of posts for a tag)
# output / TRANSLATION[lang] / TAG_PATH / tag.xml (RSS feed for a tag)
# TAG_PATH = "categories"
# If TAG_PAGES_ARE_INDEXES is set to True, each tag's page will contain
# the posts themselves. If set to False, it will be just a list of links.
# TAG_PAGES_ARE_INDEXES = False
# Set descriptions for tag pages to make them more interesting. The
# default is no description. The value is used in the meta description
# and displayed underneath the tag list or index page’s title.
# TAG_PAGES_DESCRIPTIONS = {
# DEFAULT_LANG: {
# "blogging": "Meta-blog posts about blogging about blogging.",
# "open source": "My contributions to my many, varied, ever-changing, and eternal libre software projects."
# },
#}
# Only include tags on the tag list/overview page if there are at least
# TAGLIST_MINIMUM_POSTS number of posts or more with every tag. Every tag
# page is still generated, linked from posts, and included in the sitemap.
# However, more obscure tags can be hidden from the tag index page.
# TAGLIST_MINIMUM_POSTS = 1
# Final locations are:
# output / TRANSLATION[lang] / CATEGORY_PATH / index.html (list of categories)
# output / TRANSLATION[lang] / CATEGORY_PATH / CATEGORY_PREFIX category.html (list of posts for a category)
# output / TRANSLATION[lang] / CATEGORY_PATH / CATEGORY_PREFIX category.xml (RSS feed for a category)
# CATEGORY_PATH = "categories"
# CATEGORY_PREFIX = "cat_"
# If CATEGORY_PAGES_ARE_INDEXES is set to True, each category's page will contain
# the posts themselves. If set to False, it will be just a list of links.
# CATEGORY_PAGES_ARE_INDEXES = False
# Set descriptions for category pages to make them more interesting. The
# default is no description. The value is used in the meta description
# and displayed underneath the category list or index page’s title.
# CATEGORY_PAGES_DESCRIPTIONS = {
# DEFAULT_LANG: {
# "blogging": "Meta-blog posts about blogging about blogging.",
# "open source": "My contributions to my many, varied, ever-changing, and eternal libre software projects."
# },
#}
# Final location for the main blog page and sibling paginated pages is
# output / TRANSLATION[lang] / INDEX_PATH / index-*.html
# INDEX_PATH = ""
# Create per-month archives instead of per-year
# CREATE_MONTHLY_ARCHIVE = False
# Create one large archive instead of per-year
# CREATE_SINGLE_ARCHIVE = False
# Create year, month, and day archives each with a (long) list of posts
# (overrides both CREATE_MONTHLY_ARCHIVE and CREATE_SINGLE_ARCHIVE)
# CREATE_FULL_ARCHIVES = False
# If monthly archives or full archives are created, adds also one archive per day
# CREATE_DAILY_ARCHIVE = False
# Final locations for the archives are:
# output / TRANSLATION[lang] / ARCHIVE_PATH / ARCHIVE_FILENAME
# output / TRANSLATION[lang] / ARCHIVE_PATH / YEAR / index.html
# output / TRANSLATION[lang] / ARCHIVE_PATH / YEAR / MONTH / index.html
# output / TRANSLATION[lang] / ARCHIVE_PATH / YEAR / MONTH / DAY / index.html
# ARCHIVE_PATH = ""
# ARCHIVE_FILENAME = "archive.html"
# If ARCHIVES_ARE_INDEXES is set to True, each archive page which contains a list
# of posts will contain the posts themselves. If set to False, it will be just a
# list of links.
# ARCHIVES_ARE_INDEXES = False
# URLs to other posts/pages can take 3 forms:
# rel_path: a relative URL to the current page/post (default)
# full_path: a URL with the full path from the root
# absolute: a complete URL (that includes the SITE_URL)
# URL_TYPE = 'rel_path'
# Final location for the blog main RSS feed is:
# output / TRANSLATION[lang] / RSS_PATH / rss.xml
# RSS_PATH = ""
# Number of posts in RSS feeds
# FEED_LENGTH = 10
# Slug the Tag URL easier for users to type, special characters are
# often removed or replaced as well.
# SLUG_TAG_PATH = True
# A list of redirection tuples, [("foo/from.html", "/bar/to.html")].
#
# A HTML file will be created in output/foo/from.html that redirects
# to the "/bar/to.html" URL. notice that the "from" side MUST be a
# relative URL.
#
# If you don't need any of these, just set to []
REDIRECTIONS = []
# Presets of commands to execute to deploy. Can be anything, for
# example, you may use rsync:
# "rsync -rav --delete output/ joe@my.site:/srv/www/site"
# And then do a backup, or run `nikola ping` from the `ping`
# plugin (`nikola plugin -i ping`). Or run `nikola check -l`.
# You may also want to use github_deploy (see below).
# You can define multiple presets and specify them as arguments
# to `nikola deploy`. If no arguments are specified, a preset
# named `default` will be executed. You canuse as many presets
# in a `nikola deploy` command as you like.
# DEPLOY_COMMANDS = {
# 'default': [
# "rsync -rav --delete output/ joe@my.site:/srv/www/site",
# ]
# }
# For user.github.io OR organization.github.io pages, the DEPLOY branch
# MUST be 'master', and 'gh-pages' for other repositories.
GITHUB_SOURCE_BRANCH = 'master'
GITHUB_DEPLOY_BRANCH = 'gh-pages'
# The name of the remote where you wish to push to, using github_deploy.
GITHUB_REMOTE_NAME = 'origin'
# Where the output site should be located
# If you don't use an absolute path, it will be considered as relative
# to the location of conf.py
# OUTPUT_FOLDER = 'output'
# where the "cache" of partial generated content should be located
# default: 'cache'
# CACHE_FOLDER = 'cache'
# Filters to apply to the output.
# A directory where the keys are either: a file extensions, or
# a tuple of file extensions.
#
# And the value is a list of commands to be applied in order.
#
# Each command must be either:
#
# A string containing a '%s' which will
# be replaced with a filename. The command *must* produce output
# in place.
#
# Or:
#
# A python callable, which will be called with the filename as
# argument.
#
# By default, only .php files uses filters to inject PHP into
# Nikola’s templates. All other filters must be enabled through FILTERS.
#
# Many filters are shipped with Nikola. A list is available in the manual:
# <http://getnikola.com/handbook.html#post-processing-filters>
#
# from nikola import filters
# FILTERS = {
# ".html": [filters.typogrify],
# ".js": [filters.closure_compiler],
# ".jpg": ["jpegoptim --strip-all -m75 -v %s"],
# }
# Expert setting! Create a gzipped copy of each generated file. Cheap server-
# side optimization for very high traffic sites or low memory servers.
# GZIP_FILES = False
# File extensions that will be compressed
# GZIP_EXTENSIONS = ('.txt', '.htm', '.html', '.css', '.js', '.json', '.xml')
# Use an external gzip command? None means no.
# Example: GZIP_COMMAND = "pigz -k {filename}"
# GZIP_COMMAND = None
# Make sure the server does not return a "Accept-Ranges: bytes" header for
# files compressed by this option! OR make sure that a ranged request does not
# return partial content of another representation for these resources. Do not
# use this feature if you do not understand what this means.
# Compiler to process LESS files.
# LESS_COMPILER = 'lessc'
# A list of options to pass to the LESS compiler.
# Final command is: LESS_COMPILER LESS_OPTIONS file.less
# LESS_OPTIONS = []
# Compiler to process Sass files.
# SASS_COMPILER = 'sass'
# A list of options to pass to the Sass compiler.
# Final command is: SASS_COMPILER SASS_OPTIONS file.s(a|c)ss
# SASS_OPTIONS = []
# #############################################################################
# Image Gallery Options
# #############################################################################
# One or more folders containing galleries. The format is a dictionary of
# {"source": "relative_destination"}, where galleries are looked for in
# "source/" and the results will be located in
# "OUTPUT_PATH/relative_destination/gallery_name"
# Default is:
# GALLERY_FOLDERS = {"galleries": "galleries"}
# More gallery options:
# THUMBNAIL_SIZE = 180
# MAX_IMAGE_SIZE = 1280
# USE_FILENAME_AS_TITLE = True
# EXTRA_IMAGE_EXTENSIONS = []
#
# If set to False, it will sort by filename instead. Defaults to True
# GALLERY_SORT_BY_DATE = True
#
# Folders containing images to be used in normal posts or
# pages. Images will be scaled down according to THUMBNAIL_SIZE and
# MAX_IMAGE_SIZE options, but will have to be referenced manually to
# be visible on the site. The format is a dictionary of {source:
# relative destination}.
#
# IMAGE_FOLDERS = {'images': ''}
# #############################################################################
# HTML fragments and diverse things that are used by the templates
# #############################################################################
# Data about post-per-page indexes.
# INDEXES_PAGES defaults to ' old posts, page %d' or ' page %d' (translated),
# depending on the value of INDEXES_PAGES_MAIN.
# INDEXES_TITLE = "" # (translatable) If this is empty, defaults to BLOG_TITLE
# INDEXES_PAGES = "" # (translatable) If this is empty, defaults to ' [old posts,] page %d' (see above)
# INDEXES_PAGES_MAIN = False # If True, INDEXES_PAGES is also displayed on
# # the main (the newest) index page (index.html)
# INDEXES_STATIC = True # If True, index-1.html has the oldest posts,
# # index-2.html the second-oldest posts, etc., and
# # index.html has the newest posts. This ensures
# # that all posts on index-x.html will forever
# # stay on that page, now matter how many new
# # posts are added.
# # If False, index-1.html has the second-newest
# # posts, index-2.html the third-newest, and
# # index-n.html the oldest posts. When this is
# # active, old posts can be moved to other index
# # pages when new posts are added.
# Color scheme to be used for code blocks. If your theme provides
# "assets/css/code.css" this is ignored.
# Can be any of autumn borland bw colorful default emacs friendly fruity manni
# monokai murphy native pastie perldoc rrt tango trac vim vs
# CODE_COLOR_SCHEME = 'default'
# If you use 'site-reveal' theme you can select several subthemes
# THEME_REVEAL_CONFIG_SUBTHEME = 'sky'
# You can also use: beige/serif/simple/night/default
# Again, if you use 'site-reveal' theme you can select several transitions
# between the slides
# THEME_REVEAL_CONFIG_TRANSITION = 'cube'
# You can also use: page/concave/linear/none/default
# FAVICONS contains (name, file, size) tuples.
# Used for create favicon link like this:
# <link rel="name" href="file" sizes="size"/>
# FAVICONS = {
# ("icon", "/favicon.ico", "16x16"),
# ("icon", "/icon_128x128.png", "128x128"),
# }
# Show only teasers in the index pages? Defaults to False.
# INDEX_TEASERS = False
# HTML fragments with the Read more... links.
# The following tags exist and are replaced for you:
# {link} A link to the full post page.
# {read_more} The string “Read more” in the current language.
# {reading_time} An estimate of how long it will take to read the post.
# {remaining_reading_time} An estimate of how long it will take to read the post, sans the teaser.
# {min_remaining_read} The string “{remaining_reading_time} min remaining to read” in the current language.
# {paragraph_count} The amount of paragraphs in the post.
# {remaining_paragraph_count} The amount of paragraphs in the post, sans the teaser.
# {{ A literal { (U+007B LEFT CURLY BRACKET)
# }} A literal } (U+007D RIGHT CURLY BRACKET)
# 'Read more...' for the index page, if INDEX_TEASERS is True (translatable)
INDEX_READ_MORE_LINK = '<p class="more"><a href="{link}">{read_more}…</a></p>'
# 'Read more...' for the RSS_FEED, if RSS_TEASERS is True (translatable)
RSS_READ_MORE_LINK = '<p><a href="{link}">{read_more}…</a> ({min_remaining_read})</p>'
# Append a URL query to the RSS_READ_MORE_LINK and the //rss/item/link in
# RSS feeds. Minimum example for Piwik "pk_campaign=rss" and Google Analytics
# "utm_source=rss&utm_medium=rss&utm_campaign=rss". Advanced option used for
# traffic source tracking.
RSS_LINKS_APPEND_QUERY = False
# A HTML fragment describing the license, for the sidebar.
# (translatable)
LICENSE = ""
# I recommend using the Creative Commons' wizard:
# http://creativecommons.org/choose/
# LICENSE = """
# <a rel="license" href="http://creativecommons.org/licenses/by-nc-sa/2.5/ar/">
# <img alt="Creative Commons License BY-NC-SA"
# style="border-width:0; margin-bottom:12px;"
# src="http://i.creativecommons.org/l/by-nc-sa/2.5/ar/88x31.png"></a>"""
# A small copyright notice for the page footer (in HTML).
# (translatable)
CONTENT_FOOTER = 'Contents © {date} <a href="mailto:{email}">{author}</a> - Powered by <a href="http://getnikola.com" rel="nofollow">Nikola</a> {license}'
# Things that will be passed to CONTENT_FOOTER.format(). This is done
# for translatability, as dicts are not formattable. Nikola will
# intelligently format the setting properly.
# The setting takes a dict. The keys are languages. The values are
# tuples of tuples of positional arguments and dicts of keyword arguments
# to format(). For example, {'en': (('Hello'), {'target': 'World'})}
# results in CONTENT_FOOTER['en'].format('Hello', target='World').
# WARNING: If you do not use multiple languages with CONTENT_FOOTER, this
# still needs to be a dict of this format. (it can be empty if you
# do not need formatting)
# (translatable)
CONTENT_FOOTER_FORMATS = {
DEFAULT_LANG: (
(),
{
"email": BLOG_EMAIL,
"author": BLOG_AUTHOR,
"date": time.gmtime().tm_year,
"license": LICENSE
}
)
}
# To use comments, you can choose between different third party comment
# systems. The following comment systems are supported by Nikola:
# disqus, facebook, googleplus, intensedebate, isso, livefyre, muut
# You can leave this option blank to disable comments.
COMMENT_SYSTEM = ""
# And you also need to add your COMMENT_SYSTEM_ID which
# depends on what comment system you use. The default is
# "nikolademo" which is a test account for Disqus. More information
# is in the manual.
COMMENT_SYSTEM_ID = ""
# Enable annotations using annotateit.org?
# If set to False, you can still enable them for individual posts and pages
# setting the "annotations" metadata.
# If set to True, you can disable them for individual posts and pages using
# the "noannotations" metadata.
# ANNOTATIONS = False
# Create index.html for page (story) folders?
# WARNING: if a page would conflict with the index file (usually
# caused by setting slug to `index`), the STORY_INDEX
# will not be generated for that directory.
# STORY_INDEX = False
# Enable comments on story pages?
# COMMENTS_IN_STORIES = False
# Enable comments on picture gallery pages?
# COMMENTS_IN_GALLERIES = False
# What file should be used for directory indexes?
# Defaults to index.html
# Common other alternatives: default.html for IIS, index.php
# INDEX_FILE = "index.html"
# If a link ends in /index.html, drop the index.html part.
# http://mysite/foo/bar/index.html => http://mysite/foo/bar/
# (Uses the INDEX_FILE setting, so if that is, say, default.html,
# it will instead /foo/default.html => /foo)
# (Note: This was briefly STRIP_INDEX_HTML in v 5.4.3 and 5.4.4)
# Default = False
# STRIP_INDEXES = False
# Should the sitemap list directories which only include other directories
# and no files.
# Default to True
# If this is False
# e.g. /2012 includes only /01, /02, /03, /04, ...: don't add it to the sitemap
# if /2012 includes any files (including index.html)... add it to the sitemap
# SITEMAP_INCLUDE_FILELESS_DIRS = True
# List of files relative to the server root (!) that will be asked to be excluded
# from indexing and other robotic spidering. * is supported. Will only be effective
# if SITE_URL points to server root. The list is used to exclude resources from
# /robots.txt and /sitemap.xml, and to inform search engines about /sitemapindex.xml.
# ROBOTS_EXCLUSIONS = ["/archive.html", "/category/*.html"]
# Instead of putting files in <slug>.html, put them in
# <slug>/index.html. Also enables STRIP_INDEXES
# This can be disabled on a per-page/post basis by adding
# .. pretty_url: False
# to the metadata
# PRETTY_URLS = False
# If True, publish future dated posts right away instead of scheduling them.
# Defaults to False.
# FUTURE_IS_NOW = False
# If True, future dated posts are allowed in deployed output
# Only the individual posts are published/deployed; not in indexes/sitemap
# Generally, you want FUTURE_IS_NOW and DEPLOY_FUTURE to be the same value.
# DEPLOY_FUTURE = False
# If False, draft posts will not be deployed
# DEPLOY_DRAFTS = True
# Allows scheduling of posts using the rule specified here (new_post -s)
# Specify an iCal Recurrence Rule: http://www.kanzaki.com/docs/ical/rrule.html
# SCHEDULE_RULE = ''
# If True, use the scheduling rule to all posts by default
# SCHEDULE_ALL = False
# Do you want a add a Mathjax config file?
# MATHJAX_CONFIG = ""
# If you are using the compile-ipynb plugin, just add this one:
# MATHJAX_CONFIG = """
# <script type="text/x-mathjax-config">
# MathJax.Hub.Config({
# tex2jax: {
# inlineMath: [ ['$','$'], ["\\\(","\\\)"] ],
# displayMath: [ ['$$','$$'], ["\\\[","\\\]"] ],
# processEscapes: true
# },
# displayAlign: 'left', // Change this to 'center' to center equations.
# "HTML-CSS": {
# styles: {'.MathJax_Display': {"margin": 0}}
# }
# });
# </script>
# """
# Do you want to customize the nbconversion of your IPython notebook?
# IPYNB_CONFIG = {}
# With the following example configuration you can use a custom jinja template
# called `toggle.tpl` which has to be located in your site/blog main folder:
# IPYNB_CONFIG = {'Exporter':{'template_file': 'toggle'},
# 'Browser': {'connection_url': 'http://localhost:8888/',
# 'browser': 'safari'}
# The browser types list:
# mozilla/firefox/epiphany/konqueror/grail/lynx/w3m/macosx/safari
# More detailed please refer to https://docs.python.org/2/library/webbrowser.html
# What Markdown extensions to enable?
# You will also get gist, nikola and podcast because those are
# done in the code, hope you don't mind ;-)
# Note: most Nikola-specific extensions are done via the Nikola plugin system,
# with the MarkdownExtension class and should not be added here.
# MARKDOWN_EXTENSIONS = ['fenced_code', 'codehilite']
# Extra options to pass to the pandoc comand.
# by default, it's empty, is a list of strings, for example
# ['-F', 'pandoc-citeproc', '--bibliography=/Users/foo/references.bib']
# PANDOC_OPTIONS = []
# Social buttons. This is sample code for AddThis (which was the default for a
# long time). Insert anything you want here, or even make it empty.
# (translatable)
# SOCIAL_BUTTONS_CODE = """
# <!-- Social buttons -->
# <div id="addthisbox" class="addthis_toolbox addthis_peekaboo_style addthis_default_style addthis_label_style addthis_32x32_style">
# <a class="addthis_button_more">Share</a>
# <ul><li><a class="addthis_button_facebook"></a>
# <li><a class="addthis_button_google_plusone_share"></a>
# <li><a class="addthis_button_linkedin"></a>
# <li><a class="addthis_button_twitter"></a>
# </ul>
# </div>
# <script src="//s7.addthis.com/js/300/addthis_widget.js#pubid=ra-4f7088a56bb93798"></script>
# <!-- End of social buttons -->
# """
# Show link to source for the posts?
# Formerly known as HIDE_SOURCELINK (inverse)
# SHOW_SOURCELINK = True
# Copy the source files for your pages?
# Setting it to False implies SHOW_SOURCELINK = False
# COPY_SOURCES = True
# Modify the number of Post per Index Page
# Defaults to 10
# INDEX_DISPLAY_POST_COUNT = 10
# By default, Nikola generates RSS files for the website and for tags, and
# links to it. Set this to False to disable everything RSS-related.
# GENERATE_RSS = True
# RSS_LINK is a HTML fragment to link the RSS or Atom feeds. If set to None,
# the base.tmpl will use the feed Nikola generates. However, you may want to
# change it for a feedburner feed or something else.
# RSS_LINK = None
# Show only teasers in the RSS feed? Default to True
# RSS_TEASERS = True
# Strip HTML in the RSS feed? Default to False
# RSS_PLAIN = False
# A search form to search this site, for the sidebar. You can use a Google
# custom search (http://www.google.com/cse/)
# Or a DuckDuckGo search: https://duckduckgo.com/search_box.html
# Default is no search form.
# (translatable)
# SEARCH_FORM = ""
#
# This search form works for any site and looks good in the "site" theme where
# it appears on the navigation bar:
#
# SEARCH_FORM = """
# <!-- Custom search -->
# <form method="get" id="search" action="//duckduckgo.com/"
# class="navbar-form pull-left">
# <input type="hidden" name="sites" value="%s"/>
# <input type="hidden" name="k8" value="#444444"/>
# <input type="hidden" name="k9" value="#D51920"/>
# <input type="hidden" name="kt" value="h"/>
# <input type="text" name="q" maxlength="255"
# placeholder="Search…" class="span2" style="margin-top: 4px;"/>
# <input type="submit" value="DuckDuckGo Search" style="visibility: hidden;" />
# </form>
# <!-- End of custom search -->
# """ % SITE_URL
#
# If you prefer a Google search form, here's an example that should just work:
# SEARCH_FORM = """
# <!-- Custom search with Google-->
# <form id="search" action="//www.google.com/search" method="get" class="navbar-form pull-left">
# <input type="hidden" name="q" value="site:%s" />
# <input type="text" name="q" maxlength="255" results="0" placeholder="Search"/>
# </form>
# <!-- End of custom search -->
#""" % SITE_URL
# Use content distribution networks for jquery, twitter-bootstrap css and js,
# and html5shiv (for older versions of Internet Explorer)
# If this is True, jquery and html5shiv are served from the Google CDN and
# Bootstrap is served from BootstrapCDN (provided by MaxCDN)
# Set this to False if you want to host your site without requiring access to
# external resources.
# USE_CDN = False
# Check for USE_CDN compatibility.
# If you are using custom themes, have configured the CSS properly and are
# receiving warnings about incompatibility but believe they are incorrect, you
# can set this to False.
# USE_CDN_WARNING = True
# Extra things you want in the pages HEAD tag. This will be added right
# before </head>
# (translatable)
# EXTRA_HEAD_DATA = ""
# Google Analytics or whatever else you use. Added to the bottom of <body>
# in the default template (base.tmpl).
# (translatable)
# BODY_END = ""
# The possibility to extract metadata from the filename by using a
# regular expression.
# To make it work you need to name parts of your regular expression.
# The following names will be used to extract metadata:
# - title
# - slug
# - date
# - tags
# - link
# - description
#
# An example re is the following:
# '(?P<date>\d{4}-\d{2}-\d{2})-(?P<slug>.*)-(?P<title>.*)\.md'
# FILE_METADATA_REGEXP = None
# If you hate "Filenames with Capital Letters and Spaces.md", you should
# set this to true.
UNSLUGIFY_TITLES = True
# Additional metadata that is added to a post when creating a new_post
# ADDITIONAL_METADATA = {}
# Nikola supports Open Graph Protocol data for enhancing link sharing and
# discoverability of your site on Facebook, Google+, and other services.
# Open Graph is enabled by default.
# USE_OPEN_GRAPH = True
# Nikola supports Twitter Card summaries, but they are disabled by default.
# They make it possible for you to attach media to Tweets that link
# to your content.
#
# IMPORTANT:
# Please note, that you need to opt-in for using Twitter Cards!
# To do this please visit https://cards-dev.twitter.com/validator
#
# Uncomment and modify to following lines to match your accounts.
# Images displayed come from the `previewimage` meta tag.
# You can specify the card type by using the `card` parameter in TWITTER_CARD.
# TWITTER_CARD = {
# # 'use_twitter_cards': True, # enable Twitter Cards
# # 'card': 'summary', # Card type, you can also use 'summary_large_image',
# # see https://dev.twitter.com/cards/types
# # 'site': '@website', # twitter nick for the website
# # 'creator': '@username', # Username for the content creator / author.
# }
# If webassets is installed, bundle JS and CSS to make site loading faster
# USE_BUNDLES = True
# Plugins you don't want to use. Be careful :-)
# DISABLED_PLUGINS = ["render_galleries"]
# Add the absolute paths to directories containing plugins to use them.
# For example, the `plugins` directory of your clone of the Nikola plugins
# repository.
# EXTRA_PLUGINS_DIRS = []
# List of regular expressions, links matching them will always be considered
# valid by "nikola check -l"
# LINK_CHECK_WHITELIST = []
# If set to True, enable optional hyphenation in your posts (requires pyphen)
# HYPHENATE = False
# The <hN> tags in HTML generated by certain compilers (reST/Markdown)
# will be demoted by that much (1 → h1 will become h2 and so on)
# This was a hidden feature of the Markdown and reST compilers in the
# past. Useful especially if your post titles are in <h1> tags too, for
# example.
# (defaults to 1.)
# DEMOTE_HEADERS = 1
# If you don’t like slugified file names ([a-z0-9] and a literal dash),
# and would prefer to use all the characters your file system allows.
# USE WITH CARE! This is also not guaranteed to be perfect, and may
# sometimes crash Nikola, your web server, or eat your cat.
# USE_SLUGIFY = True
# You can configure the logging handlers installed as plugins or change the
# log level of the default stderr handler.
# WARNING: The stderr handler allows only the loglevels of 'INFO' and 'DEBUG'.
# This is done for safety reasons, as blocking out anything other
# than 'DEBUG' may hide important information and break the user
# experience!
LOGGING_HANDLERS = {
'stderr': {'loglevel': 'INFO', 'bubble': True},
# 'smtp': {
# 'from_addr': 'test-errors@example.com',
# 'recipients': ('test@example.com'),
# 'credentials':('testusername', 'password'),
# 'server_addr': ('127.0.0.1', 25),
# 'secure': (),
# 'level': 'DEBUG',
# 'bubble': True
# }
}
# Templates will use those filters, along with the defaults.
# Consult your engine's documentation on filters if you need help defining
# those.
# TEMPLATE_FILTERS = {}
# Put in global_context things you want available on all your templates.
# It can be anything, data, functions, modules, etc.
GLOBAL_CONTEXT = {}
# Add functions here and they will be called with template
# GLOBAL_CONTEXT as parameter when the template is about to be
# rendered
GLOBAL_CONTEXT_FILLER = []
|
dongweiming/divingintoipynb_nikola
|
conf.py
|
Python
|
mit
| 37,016
|
[
"VisIt"
] |
efe2779745e030962577a975889ec734f913068b405b9d14f6630f21f933a79b
|
# -*- coding: utf-8 -*-
# This file is part of Invenio Demosite.
# Copyright (C) 2012, 2013 CERN.
#
# Invenio Demosite is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio Demosite is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""
BibWorkflow Unit tests - functions to test workflows
"""
from invenio.ext.sqlalchemy import db
from invenio.testsuite import (make_test_suite,
run_test_suite,
InvenioTestCase)
from invenio.modules.workflows.config import CFG_OBJECT_VERSION
class TestWorkflowStart(InvenioTestCase):
"""Tests for BibWorkflow API."""
def setUp(self):
self.test_data = {}
self.id_workflows = []
self.recxml = """<?xml version="1.0" encoding="UTF-8"?>
<OAI-PMH xmlns="http://www.openarchives.org/OAI/2.0/" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://www.openarchives.org/OAI/2.0/ http://www.openarchives.org/OAI/2.0/OAI-PMH.xsd">
<responseDate>2013-04-03T13:56:49Z</responseDate>
<request verb="ListRecords" from="2013-03-25" metadataPrefix="arXiv" set="physics:astro-ph">http://export.arxiv.org/oai2</request>
<ListRecords>
<record>
<header>
<identifier>oai:arXiv.org:0801.3931</identifier>
<datestamp>2013-03-26</datestamp>
<setSpec>physics:astro-ph</setSpec>
</header>
<metadata>
<arXiv xmlns="http://arxiv.org/OAI/arXiv/" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://arxiv.org/OAI/arXiv/ http://arxiv.org/OAI/arXiv.xsd">
<id>0801.3931</id><created>2008-01-25</created><authors><author><keyname>Manos</keyname><forenames>T.</forenames></author><author><keyname>Athanassoula</keyname><forenames>E.</forenames></author></authors><title>Dynamical study of 2D and 3D barred galaxy models</title><categories>astro-ph</categories><comments>8 pages, 3 figures, to appear in the proceedings of the international
conference "Chaos in Astronomy", Athens, Greece (talk contribution)</comments><journal-ref>Chaos in Astronomy Astrophysics and Space Science Proceedings
2009, pp 115-122</journal-ref><doi>10.1007/978-3-540-75826-6_11</doi><abstract> We study the dynamics of 2D and 3D barred galaxy analytical models, focusing
on the distinction between regular and chaotic orbits with the help of the
Smaller ALigment Index (SALI), a very powerful tool for this kind of problems.
We present briefly the method and we calculate the fraction of chaotic and
regular orbits in several cases. In the 2D model, taking initial conditions on
a Poincar\'{e} $(y,p_y)$ surface of section, we determine the fraction of
regular and chaotic orbits. In the 3D model, choosing initial conditions on a
cartesian grid in a region of the $(x, z, p_y)$ space, which in coordinate
space covers the inner disc, we find how the fraction of regular orbits changes
as a function of the Jacobi constant. Finally, we outline that regions near the
$(x,y)$ plane are populated mainly by regular orbits. The same is true for
regions that lie either near to the galactic center, or at larger relatively
distances from it.
</abstract></arXiv>
</metadata>
</record>
</ListRecords>
</OAI-PMH>
"""
def tearDown(self):
""" Clean up created objects """
from invenio.modules.workflows.models import (BibWorkflowObject,
Workflow,
BibWorkflowEngineLog,
BibWorkflowObjectLog)
from invenio.modules.workflows.utils import get_redis_keys, set_up_redis
workflows = Workflow.get(Workflow.module_name == "unit_tests").all()
for workflow in workflows:
BibWorkflowObject.query.filter(
BibWorkflowObject.id_workflow == workflow.uuid
).delete()
objects = BibWorkflowObjectLog.query.filter(
BibWorkflowObject.id_workflow == workflow.uuid
).all()
for obj in objects:
db.session.delete(obj)
db.session.delete(workflow)
objects = BibWorkflowObjectLog.query.filter(
BibWorkflowObject.id_workflow == workflow.uuid
).all()
for obj in objects:
BibWorkflowObjectLog.delete(id=obj.id)
BibWorkflowEngineLog.delete(uuid=workflow.uuid)
# Deleting dumy object created in tests
db.session.query(BibWorkflowObject).filter(
BibWorkflowObject.id_workflow.in_([11, 123, 253])
).delete(synchronize_session='fetch')
Workflow.query.filter(Workflow.module_name == "unit_tests").delete()
db.session.commit()
rs = set_up_redis()
keys = get_redis_keys()
for key in keys:
keys2 = get_redis_keys(key)
for key2 in keys2:
rs.delete("holdingpen_sort:%s:%s" % (key, key2,))
rs.delete("holdingpen_sort:%s" % (key,))
rs.delete("holdingpen_sort")
def test_workflow_basic_run(self):
"""Tests running workflow with one data object"""
from invenio.modules.workflows.models import BibWorkflowObject
from invenio.modules.workflows.api import start
self.test_data = {'data': 20}
initial_data = self.test_data
final_data = {'data': 41}
workflow = start(workflow_name="test_workflow",
data=[self.test_data], module_name="unit_tests")
# Keep id for cleanup after
self.id_workflows.append(workflow.uuid)
# Get parent object of the workflow we just ran
initial_object = BibWorkflowObject.query.filter(
BibWorkflowObject.id_workflow == workflow.uuid,
BibWorkflowObject.id_parent == None) # noqa E711
all_objects = BibWorkflowObject.query.filter(
BibWorkflowObject.id_workflow == workflow.uuid)
# There should only be 2 objects (initial, final)
self.assertEqual(all_objects.count(), 2)
self._check_workflow_execution(initial_object,
initial_data, final_data)
def test_workflow_complex_run(self):
"""Tests running workflow with several data objects"""
from invenio.modules.workflows.models import BibWorkflowObject
from invenio.modules.workflows.api import start
self.test_data = [{'data': 1}, {'data': "wwww"}, {'data': 20}]
final_data = [{'data': 19}, {'data': "wwww"}, {'data': 38}]
workflow = start(workflow_name="test_workflow_2",
data=self.test_data, module_name="unit_tests")
# Keep id for cleanup after
self.id_workflows.append(workflow.uuid)
# Get parent objects of the workflow we just ran
objects = BibWorkflowObject.query.filter(
BibWorkflowObject.id_workflow == workflow.uuid,
BibWorkflowObject.id_parent == None) # noqa E711
# Let's check that we found anything.
# There should only be three objects
self.assertEqual(objects.count(), 3)
all_objects = BibWorkflowObject.query.filter(
BibWorkflowObject.id_workflow == workflow.uuid)
self.assertEqual(all_objects.count(), 6)
for obj in objects.all():
# The child object should have the final or halted version
self.assertTrue(obj.child_objects[0].version
in (CFG_OBJECT_VERSION.FINAL,
CFG_OBJECT_VERSION.HALTED))
# Making sure the final data is correct
self.assertTrue(obj.child_objects[0].get_data()
in final_data)
def test_workflow_recordxml(self):
"""Tests runnning a record ingestion workflow"""
from invenio.modules.workflows.models import BibWorkflowObject
from invenio.modules.workflows.api import start
initial_data = self.recxml
workflow = start(workflow_name="marcxml_workflow",
data=[initial_data], module_name="unit_tests")
# Keep id for cleanup after
self.id_workflows.append(workflow.uuid)
# Get parent object of the workflow we just ran
objects = BibWorkflowObject.query.filter(
BibWorkflowObject.id_workflow == workflow.uuid,
BibWorkflowObject.id_parent == None) # noqa E711
all_objects = BibWorkflowObject.query.filter(
BibWorkflowObject.id_workflow == workflow.uuid)
self.assertEqual(all_objects.count(), 2)
self._check_workflow_execution(objects,
initial_data, None)
def test_workflow_for_halted_object(self):
"""Test starting workflow with halted object given"""
from invenio.modules.workflows.models import BibWorkflowObject
from invenio.modules.workflows.api import start_by_oids
initial_data = {'data': 1}
obj_init = BibWorkflowObject(id_workflow=123,
version=CFG_OBJECT_VERSION.INITIAL)
obj_init.set_data(initial_data)
obj_init._update_db()
halted_data = {'data': 1}
obj_halted = BibWorkflowObject(id_workflow=123,
id_parent=obj_init.id,
version=CFG_OBJECT_VERSION.HALTED)
obj_halted.set_data(halted_data)
obj_halted._update_db()
workflow = start_by_oids('test_workflow',
[obj_halted.id], module_name="unit_tests")
final_data = {'data': 2}
objects = BibWorkflowObject.query.filter(
BibWorkflowObject.id_workflow == workflow.uuid,
BibWorkflowObject.id_parent == None) # noqa E711
all_objects = BibWorkflowObject.query.filter(
BibWorkflowObject.id_workflow == workflow.uuid)
self.assertEqual(all_objects.count(), 2)
# Check the workflow execution
self._check_workflow_execution(objects,
halted_data,
final_data)
# Check copied INITIAL object
self.assertEqual(obj_halted.get_data(), objects[0].get_data())
# Check if first object were untached
self.assertEqual(obj_init.id_workflow, "123")
self.assertEqual(obj_halted.id_workflow, "123")
def test_workflow_for_finished_object(self):
"""Test starting workflow with finished object given"""
from invenio.modules.workflows.models import BibWorkflowObject
from invenio.modules.workflows.api import start_by_oids
initial_data = {'data': 20}
obj_init = BibWorkflowObject(id_workflow=253,
version=CFG_OBJECT_VERSION.INITIAL)
obj_init.set_data(initial_data)
obj_init._update_db()
first_final_data = {u'data': 41}
obj_final = BibWorkflowObject(id_workflow=253,
id_parent=obj_init.id,
version=CFG_OBJECT_VERSION.FINAL)
obj_final.set_data(first_final_data)
obj_final._update_db()
workflow = start_by_oids('test_workflow',
[obj_final.id], module_name="unit_tests")
final_data = {u'data': 62}
objects = BibWorkflowObject.query.filter(
BibWorkflowObject.id_workflow == workflow.uuid,
BibWorkflowObject.id_parent == None) # noqa E711
all_objects = BibWorkflowObject.query.filter(
BibWorkflowObject.id_workflow == workflow.uuid)
self.assertEqual(all_objects.count(), 2)
# Check the workflow execution
self._check_workflow_execution(objects,
first_final_data,
final_data)
# Check copied INITIAL object
self.assertEqual(obj_final.get_data(), objects[0].get_data())
# Check if first object were untached
self.assertEqual(obj_init.id_workflow, "253")
self.assertEqual(obj_final.id_workflow, "253")
def test_logging_for_workflow_objects_without_workflow(self):
"""This test run a virtual object out of a workflow for
test purpose, this object will log several things"""
from invenio.modules.workflows.models import (BibWorkflowObject,
BibWorkflowObjectLog)
initial_data = {'data': 20}
obj_init = BibWorkflowObject(id_workflow=11,
version=CFG_OBJECT_VERSION.INITIAL)
obj_init.set_data(initial_data)
obj_init._update_db()
obj_init.save()
obj_init.log.info("I am a test object")
obj_init.log.error("This is an error message")
# FIXME: loglevels are simply overwritten somewhere in Celery
# even if Celery is not being "used".
#
# This means loglevel.DEBUG is NOT working at the moment!
obj_init.log.debug("This is a debug message")
obj_init._update_db()
obj_test = BibWorkflowObjectLog.query.filter(
BibWorkflowObjectLog.id_object == obj_init.id).all()
messages_found = 0
for current_obj in obj_test:
if current_obj.message == "I am a test object" \
and messages_found == 0:
messages_found += 1
elif current_obj.message == "This is an error message" \
and messages_found == 1:
messages_found += 1
elif current_obj.message == "This is a debug message" \
and messages_found == 2:
messages_found += 1
self.assertEqual(messages_found, 2) # FIXME: should be 3 when debug works
def test_workflow_for_running_object(self):
"""Test starting workflow with running object given"""
from invenio.modules.workflows.models import BibWorkflowObject
from invenio.modules.workflows.api import start_by_oids
initial_data = {'data': 20}
obj_init = BibWorkflowObject(id_workflow=11,
version=CFG_OBJECT_VERSION.INITIAL)
obj_init.set_data(initial_data)
obj_init._update_db()
running_data = {'data': 26}
obj_running = BibWorkflowObject(id_workflow=11,
id_parent=obj_init.id,
version=CFG_OBJECT_VERSION.RUNNING)
obj_running.set_data(running_data)
obj_running._update_db()
workflow = start_by_oids('test_workflow',
[obj_running.id], module_name="unit_tests")
final_data = {u'data': 41}
objects = BibWorkflowObject.query.filter(
BibWorkflowObject.id_workflow == workflow.uuid,
BibWorkflowObject.id_parent == None) # noqa E711
all_objects = BibWorkflowObject.query.filter(
BibWorkflowObject.id_workflow == workflow.uuid)
self.assertEqual(all_objects.count(), 2)
# Check the workflow execution
self._check_workflow_execution(objects,
initial_data,
final_data)
# Check copied INITIAL object
self.assertEqual(obj_init.get_data(), objects[0].get_data())
# Check if first object were untuched
self.assertEqual(obj_init.id_workflow, "11")
objects = BibWorkflowObject.query.filter(
BibWorkflowObject.id == obj_running.id)
self.assertEqual(objects.count(), 0)
def test_continue_execution_for_object(self):
"""Tests continuing execution of workflow for object
given object from prev, current and next task"""
from invenio.modules.workflows.models import BibWorkflowObject
from invenio.modules.workflows.api import (start,
continue_oid)
initial_data = {'data': 1}
final_data_prev = {'data': 3}
final_data_curr = {'data': 2}
final_data_next = {'data': 9}
# testing restarting from previous task
init_workflow = start("test_workflow",
data=[initial_data], module_name="unit_tests")
obj_halted = BibWorkflowObject.query.filter(
BibWorkflowObject.id_workflow == init_workflow.uuid,
BibWorkflowObject.version == CFG_OBJECT_VERSION.HALTED).first()
workflow = continue_oid(oid=obj_halted.id,
start_point="restart_prev", module_name="unit_tests")
new_object = BibWorkflowObject.query.filter(
BibWorkflowObject.id == obj_halted.id)
self.assertEqual(new_object.count(), 1)
self.assertEqual(new_object[0].get_data(), final_data_prev)
all_objects = BibWorkflowObject.query.filter(
BibWorkflowObject.id_workflow == workflow.uuid)
self.assertEqual(all_objects.count(), 2)
# testing restarting from current task
init_workflow2 = start(workflow_name="test_workflow",
data=[initial_data], module_name="unit_tests")
obj_halted2 = BibWorkflowObject.query.filter(
BibWorkflowObject.id_workflow == init_workflow2.uuid,
BibWorkflowObject.version == CFG_OBJECT_VERSION.HALTED).first()
workflow2 = continue_oid(oid=obj_halted.id,
start_point="restart_task")
object2 = BibWorkflowObject.query.filter(
BibWorkflowObject.id == obj_halted2.id)
self.assertEqual(object2.count(), 1)
self.assertEqual(object2[0].get_data(), final_data_curr)
all_objects2 = BibWorkflowObject.query.filter(
BibWorkflowObject.id_workflow == workflow2.uuid)
self.assertEqual(all_objects2.count(), 2)
# testing continuing from next task
init_workflow3 = start(workflow_name="test_workflow",
data=[initial_data], module_name="unit_tests")
obj_halted3 = BibWorkflowObject.query.filter(
BibWorkflowObject.id_workflow == init_workflow3.uuid,
BibWorkflowObject.version == CFG_OBJECT_VERSION.HALTED).first()
workflow3 = continue_oid(oid=obj_halted3.id,
start_point="continue_next", module_name="unit_tests")
object3 = BibWorkflowObject.query.filter(
BibWorkflowObject.id == obj_halted3.id)
self.assertEqual(object3.count(), 1)
self.assertEqual(object3[0].get_data(), final_data_next)
all_objects3 = BibWorkflowObject.query.filter(
BibWorkflowObject.id_workflow == workflow3.uuid)
self.assertEqual(all_objects3.count(), 2)
def test_restart_workflow(self):
"""Tests restarting workflow for given workflow id"""
from invenio.modules.workflows.models import BibWorkflowObject
from invenio.modules.workflows.api import (start, start_by_wid)
initial_data = {'data': 1}
# testing restarting from previous task
init_workflow = start(workflow_name="test_workflow",
data=[initial_data], module_name="unit_tests")
init_objects = BibWorkflowObject.query.filter(
BibWorkflowObject.id_workflow == init_workflow.uuid)
restarted_workflow = start_by_wid(wid=init_workflow.uuid, module_name="unit_tests")
restarted_objects = BibWorkflowObject.query.filter(
BibWorkflowObject.id_workflow == restarted_workflow.uuid)
self.assertEqual(restarted_objects.count(), 1)
self.assertEqual(restarted_objects[0].version, init_objects[1].version)
self.assertEqual(restarted_objects[0].id_parent, init_objects[0].id)
self.assertEqual(restarted_objects[0].get_data(), init_objects[1].get_data())
def test_simplified_data(self):
"""Tests running workflow with simplified data."""
from invenio.modules.workflows.models import BibWorkflowObject
from invenio.modules.workflows.api import start
self.test_data = 20
initial_data = self.test_data
final_data = 41
workflow = start(workflow_name="simplified_data_test_workflow",
data=[self.test_data], module_name="unit_tests")
# Keep id for cleanup after
self.id_workflows.append(workflow.uuid)
# Get parent object of the workflow we just ran
# NOTE: ignore PEP8 here for None
objects = BibWorkflowObject.query.filter(
BibWorkflowObject.id_workflow == workflow.uuid,
BibWorkflowObject.id_parent == None) # noqa E711
all_objects = BibWorkflowObject.query.filter(
BibWorkflowObject.id_workflow == workflow.uuid)
self.assertEqual(all_objects.count(), 2)
self._check_workflow_execution(objects,
initial_data, final_data)
def test_redis_for_halted(self):
from invenio.modules.workflows.models import BibWorkflowObject
from invenio.modules.workflows.api import start
from invenio.modules.workflows.utils import set_up_redis
initial_data = {'data': 1}
workflow = start(workflow_name="test_workflow",
data=[initial_data], module_name="unit_tests")
obj = BibWorkflowObject.query.filter(
BibWorkflowObject.id_workflow == workflow.uuid,
BibWorkflowObject.id_parent != None).one()
rs = set_up_redis()
entry1 = rs.smembers("holdingpen_sort:publisher:Desy")
entry2 = rs.smembers("holdingpen_sort:category:lower_than_20")
self.assertTrue(str(obj.id) in entry1)
self.assertTrue(str(obj.id) in entry2)
def test_redis_for_finished(self):
pass
def test_data_object_created_outside(self):
from invenio.modules.workflows.models import BibWorkflowObject
from invenio.modules.workflows.api import start
obj = BibWorkflowObject()
initial_data = {'data': 20}
obj.set_data(initial_data)
obj._update_db()
final_data = {'data': 41}
workflow = start(workflow_name="test_workflow",
data=[obj], module_name="unit_tests")
# Keep id for cleanup after
self.id_workflows.append(workflow.uuid)
# Get parent object of the workflow we just ran
initial_object = BibWorkflowObject.query.filter(
BibWorkflowObject.id_workflow == workflow.uuid,
BibWorkflowObject.id_parent == None) # noqa E711
all_objects = BibWorkflowObject.query.filter(
BibWorkflowObject.id_workflow == workflow.uuid)
# There should only be 2 objects (initial, final)
self.assertEqual(all_objects.count(), 2)
self.assertEqual(obj.get_data(), final_data)
self.assertEqual(obj.version, CFG_OBJECT_VERSION.FINAL)
self.assertEqual(obj.id_parent, initial_object[0].id)
self.assertEqual(initial_object[0].get_data(), initial_data)
def _check_workflow_execution(self, objects, initial_data, final_data):
# Let's check that we found anything. There should only be one object
self.assertEqual(objects.count(), 1)
parent_object = objects[0]
# The object should be the inital version
self.assertEqual(parent_object.version, CFG_OBJECT_VERSION.INITIAL)
# The object should have the inital data
self.assertEqual(parent_object.get_data(), initial_data)
# Fetch final object which should exist
final_object = objects[0].child_objects[0]
self.assertTrue(final_object)
if final_data:
# Check that final data is correct
self.assertEqual(final_object.get_data(), final_data)
TEST_SUITE = make_test_suite(TestWorkflowStart)
if __name__ == "__main__":
run_test_suite(TEST_SUITE)
|
mvesper/invenio-demosite
|
invenio_demosite/testsuite/regression/test_workflows.py
|
Python
|
gpl-2.0
| 24,610
|
[
"Galaxy"
] |
3d0809d722a852ef3555e9b7a73906bb5eb7e7805a14a40bee4349e0bbfe3d94
|
# encoding:utf-8
"""
:synopsis: views diplaying and processing main content post forms
This module contains views that allow adding, editing, and deleting main textual content.
"""
import datetime
import logging
import os
import os.path
import random
import sys
import tempfile
import time
from django.shortcuts import get_object_or_404
from django.shortcuts import render
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.http import HttpResponse
from django.http import HttpResponseBadRequest
from django.http import HttpResponseForbidden
from django.http import HttpResponseRedirect
from django.http import Http404
from django.utils import simplejson
from django.utils.html import strip_tags, escape
from django.utils.translation import get_language
from django.utils.translation import ugettext as _
from django.utils.translation import ugettext_lazy
from django.core.urlresolvers import reverse
from django.core import exceptions
from django.conf import settings
from django.views.decorators import csrf
from askbot import exceptions as askbot_exceptions
from askbot import forms
from askbot import models
from askbot.models import signals
from askbot.conf import settings as askbot_settings
from askbot.utils import decorators
from askbot.utils.forms import format_errors
from askbot.utils.functions import diff_date
from askbot.utils import url_utils
from askbot.utils.file_utils import store_file
from askbot.utils.loading import load_module
from askbot.views import context
from askbot.templatetags import extra_filters_jinja as template_filters
from askbot.importers.stackexchange import management as stackexchange#todo: may change
from askbot.utils.slug import slugify
# used in index page
INDEX_PAGE_SIZE = 20
INDEX_AWARD_SIZE = 15
INDEX_TAGS_SIZE = 100
# used in tags list
DEFAULT_PAGE_SIZE = 60
# used in questions
QUESTIONS_PAGE_SIZE = 10
# used in answers
ANSWERS_PAGE_SIZE = 10
@csrf.csrf_exempt
def upload(request):#ajax upload file to a question or answer
"""view that handles file upload via Ajax
"""
# check upload permission
result = ''
error = ''
new_file_name = ''
try:
#may raise exceptions.PermissionDenied
result, error, file_url, orig_file_name = None, '', None, None
if request.user.is_anonymous():
msg = _('Sorry, anonymous users cannot upload files')
raise exceptions.PermissionDenied(msg)
request.user.assert_can_upload_file()
#todo: build proper form validation
file_name_prefix = request.POST.get('file_name_prefix', '')
if file_name_prefix not in ('', 'group_logo_'):
raise exceptions.PermissionDenied('invalid upload file name prefix')
#todo: check file type
uploaded_file = request.FILES['file-upload']#take first file
orig_file_name = uploaded_file.name
#todo: extension checking should be replaced with mimetype checking
#and this must be part of the form validation
file_extension = os.path.splitext(orig_file_name)[1].lower()
if not file_extension in settings.ASKBOT_ALLOWED_UPLOAD_FILE_TYPES:
file_types = "', '".join(settings.ASKBOT_ALLOWED_UPLOAD_FILE_TYPES)
msg = _("allowed file types are '%(file_types)s'") % \
{'file_types': file_types}
raise exceptions.PermissionDenied(msg)
# generate new file name and storage object
file_storage, new_file_name, file_url = store_file(
uploaded_file, file_name_prefix
)
# check file size
# byte
size = file_storage.size(new_file_name)
if size > settings.ASKBOT_MAX_UPLOAD_FILE_SIZE:
file_storage.delete(new_file_name)
msg = _("maximum upload file size is %(file_size)sK") % \
{'file_size': settings.ASKBOT_MAX_UPLOAD_FILE_SIZE}
raise exceptions.PermissionDenied(msg)
except exceptions.PermissionDenied, e:
error = unicode(e)
except Exception, e:
logging.critical(unicode(e))
error = _('Error uploading file. Please contact the site administrator. Thank you.')
if error == '':
result = 'Good'
else:
result = ''
file_url = ''
#data = simplejson.dumps({
# 'result': result,
# 'error': error,
# 'file_url': file_url
#})
#return HttpResponse(data, mimetype = 'application/json')
xml_template = "<result><msg><![CDATA[%s]]></msg><error><![CDATA[%s]]></error><file_url>%s</file_url><orig_file_name><![CDATA[%s]]></orig_file_name></result>"
xml = xml_template % (result, error, file_url, orig_file_name)
return HttpResponse(xml, mimetype="application/xml")
def __import_se_data(dump_file):
"""non-view function that imports the SE data
in the future may import other formats as well
In this function stdout is temporarily
redirected, so that the underlying importer management
command could stream the output to the browser
todo: maybe need to add try/except clauses to restore
the redirects in the exceptional situations
"""
fake_stdout = tempfile.NamedTemporaryFile()
real_stdout = sys.stdout
sys.stdout = fake_stdout
importer = stackexchange.ImporterThread(dump_file = dump_file.name)
importer.start()
#run a loop where we'll be reading output of the
#importer tread and yielding it to the caller
read_stdout = open(fake_stdout.name, 'r')
file_pos = 0
fd = read_stdout.fileno()
yield '<html><body><style>* {font-family: sans;} p {font-size: 12px; line-height: 16px; margin: 0; padding: 0;}</style><h1>Importing your data. This may take a few minutes...</h1>'
while importer.isAlive():
c_size = os.fstat(fd).st_size
if c_size > file_pos:
line = read_stdout.readline()
yield '<p>' + line + '</p>'
file_pos = read_stdout.tell()
fake_stdout.close()
read_stdout.close()
dump_file.close()
sys.stdout = real_stdout
yield '<p>Done. Please, <a href="%s">Visit Your Forum</a></p></body></html>' % reverse('index')
@csrf.csrf_protect
def import_data(request):
"""a view allowing the site administrator
upload stackexchange data
"""
#allow to use this view to site admins
#or when the forum in completely empty
if request.user.is_anonymous() or (not request.user.is_administrator()):
if models.Post.objects.get_questions().exists():
raise Http404
if request.method == 'POST':
#if not request.is_ajax():
# raise Http404
form = forms.DumpUploadForm(request.POST, request.FILES)
if form.is_valid():
dump_file = form.cleaned_data['dump_file']
dump_storage = tempfile.NamedTemporaryFile()
#save the temp file
for chunk in dump_file.chunks():
dump_storage.write(chunk)
dump_storage.flush()
return HttpResponse(__import_se_data(dump_storage))
#yield HttpResponse(_('StackExchange import complete.'), mimetype='text/plain')
#dump_storage.close()
else:
form = forms.DumpUploadForm()
data = {
'dump_upload_form': form,
'need_configuration': (not stackexchange.is_ready())
}
return render(request, 'import_data.html', data)
#@login_required #actually you can post anonymously, but then must register
@csrf.csrf_protect
@decorators.check_authorization_to_post(ugettext_lazy('Please log in to make posts'))
@decorators.check_spam('text')
def ask(request):#view used to ask a new question
"""a view to ask a new question
gives space for q title, body, tags and checkbox for to post as wiki
user can start posting a question anonymously but then
must login/register in order for the question go be shown
"""
if request.user.is_authenticated():
if request.user.is_read_only():
referer = request.META.get("HTTP_REFERER", reverse('questions'))
request.user.message_set.create(message=_('Sorry, but you have only read access'))
return HttpResponseRedirect(referer)
form = forms.AskForm(request.REQUEST, user=request.user)
if request.method == 'POST':
if form.is_valid():
timestamp = datetime.datetime.now()
title = form.cleaned_data['title']
wiki = form.cleaned_data['wiki']
tagnames = form.cleaned_data['tags']
text = form.cleaned_data['text']
ask_anonymously = form.cleaned_data['ask_anonymously']
post_privately = form.cleaned_data['post_privately']
group_id = form.cleaned_data.get('group_id', None)
language = form.cleaned_data.get('language', None)
if request.user.is_authenticated():
drafts = models.DraftQuestion.objects.filter(
author=request.user
)
drafts.delete()
user = form.get_post_user(request.user)
try:
question = user.post_question(
title=title,
body_text=text,
tags=tagnames,
wiki=wiki,
is_anonymous=ask_anonymously,
is_private=post_privately,
timestamp=timestamp,
group_id=group_id,
language=language
)
signals.new_question_posted.send(None,
question=question,
user=user,
form_data=form.cleaned_data
)
return HttpResponseRedirect(question.get_absolute_url())
except exceptions.PermissionDenied, e:
request.user.message_set.create(message = unicode(e))
return HttpResponseRedirect(reverse('index'))
else:
request.session.flush()
session_key = request.session.session_key
models.AnonymousQuestion.objects.create(
session_key = session_key,
title = title,
tagnames = tagnames,
wiki = wiki,
is_anonymous = ask_anonymously,
text = text,
added_at = timestamp,
ip_addr = request.META['REMOTE_ADDR'],
)
return HttpResponseRedirect(url_utils.get_login_url())
if request.method == 'GET':
form = forms.AskForm(user=request.user)
draft_title = ''
draft_text = ''
draft_tagnames = ''
if request.user.is_authenticated():
drafts = models.DraftQuestion.objects.filter(author=request.user)
if len(drafts) > 0:
draft = drafts[0]
draft_title = draft.title
draft_text = draft.text
draft_tagnames = draft.tagnames
form.initial = {
'ask_anonymously': request.REQUEST.get('ask_anonymousy', False),
'tags': request.REQUEST.get('tags', draft_tagnames),
'text': request.REQUEST.get('text', draft_text),
'title': request.REQUEST.get('title', draft_title),
'post_privately': request.REQUEST.get('post_privately', False),
'language': get_language(),
'wiki': request.REQUEST.get('wiki', False),
}
if 'group_id' in request.REQUEST:
try:
group_id = int(request.GET.get('group_id', None))
form.initial['group_id'] = group_id
except Exception:
pass
data = {
'active_tab': 'ask',
'page_class': 'ask-page',
'form' : form,
'mandatory_tags': models.tag.get_mandatory_tags(),
'email_validation_faq_url':reverse('faq') + '#validate',
'category_tree_data': askbot_settings.CATEGORY_TREE,
'tag_names': list()#need to keep context in sync with edit_question for tag editor
}
data.update(context.get_for_tag_editor())
return render(request, 'ask.html', data)
@login_required
@csrf.csrf_exempt
def retag_question(request, id):
"""retag question view
"""
question = get_object_or_404(models.Post, id=id)
try:
request.user.assert_can_retag_question(question)
if request.method == 'POST':
form = forms.RetagQuestionForm(question, request.POST)
if form.is_valid():
if form.has_changed():
request.user.retag_question(question=question, tags=form.cleaned_data['tags'])
if request.is_ajax():
response_data = {
'success': True,
'new_tags': question.thread.tagnames
}
if request.user.message_set.count() > 0:
#todo: here we will possibly junk messages
message = request.user.get_and_delete_messages()[-1]
response_data['message'] = message
data = simplejson.dumps(response_data)
return HttpResponse(data, mimetype="application/json")
else:
return HttpResponseRedirect(question.get_absolute_url())
elif request.is_ajax():
response_data = {
'message': format_errors(form.errors['tags']),
'success': False
}
data = simplejson.dumps(response_data)
return HttpResponse(data, mimetype="application/json")
else:
form = forms.RetagQuestionForm(question)
data = {
'active_tab': 'questions',
'question': question,
'form' : form,
}
return render(request, 'question_retag.html', data)
except exceptions.PermissionDenied, e:
if request.is_ajax():
response_data = {
'message': unicode(e),
'success': False
}
data = simplejson.dumps(response_data)
return HttpResponse(data, mimetype="application/json")
else:
request.user.message_set.create(message = unicode(e))
return HttpResponseRedirect(question.get_absolute_url())
@login_required
@csrf.csrf_protect
@decorators.check_spam('text')
def edit_question(request, id):
"""edit question view
"""
question = get_object_or_404(models.Post, id=id)
revision = question.get_latest_revision()
revision_form = None
try:
request.user.assert_can_edit_question(question)
if request.method == 'POST':
if request.POST['select_revision'] == 'true':
#revert-type edit - user selected previous revision
revision_form = forms.RevisionForm(
question,
revision,
request.POST
)
if revision_form.is_valid():
# Replace with those from the selected revision
rev_id = revision_form.cleaned_data['revision']
revision = question.revisions.get(revision = rev_id)
form = forms.EditQuestionForm(
question=question,
user=request.user,
revision=revision
)
else:
form = forms.EditQuestionForm(
request.POST,
question=question,
user=question.user,
revision=revision
)
else:#new content edit
# Always check modifications against the latest revision
form = forms.EditQuestionForm(
request.POST,
question=question,
revision=revision,
user=request.user,
)
revision_form = forms.RevisionForm(question, revision)
if form.is_valid():
if form.has_changed():
if form.cleaned_data['reveal_identity']:
question.thread.remove_author_anonymity()
if 'language' in form.cleaned_data:
question.thread.language_code = form.cleaned_data['language']
is_anon_edit = form.cleaned_data['stay_anonymous']
is_wiki = form.cleaned_data.get('wiki', question.wiki)
post_privately = form.cleaned_data['post_privately']
suppress_email = form.cleaned_data['suppress_email']
user = form.get_post_user(request.user)
user.edit_question(
question=question,
title=form.cleaned_data['title'],
body_text=form.cleaned_data['text'],
revision_comment = form.cleaned_data['summary'],
tags = form.cleaned_data['tags'],
wiki = is_wiki,
edit_anonymously = is_anon_edit,
is_private = post_privately,
suppress_email=suppress_email
)
return HttpResponseRedirect(question.get_absolute_url())
else:
#request type was "GET"
revision_form = forms.RevisionForm(question, revision)
initial = {
'language': question.thread.language_code,
'post_privately': question.is_private(),
'wiki': question.wiki
}
form = forms.EditQuestionForm(
question=question,
revision=revision,
user=request.user,
initial=initial
)
data = {
'page_class': 'edit-question-page',
'active_tab': 'questions',
'question': question,
'revision': revision,
'revision_form': revision_form,
'mandatory_tags': models.tag.get_mandatory_tags(),
'form' : form,
'tag_names': question.thread.get_tag_names(),
'category_tree_data': askbot_settings.CATEGORY_TREE
}
data.update(context.get_for_tag_editor())
return render(request, 'question_edit.html', data)
except exceptions.PermissionDenied, e:
request.user.message_set.create(message = unicode(e))
return HttpResponseRedirect(question.get_absolute_url())
@login_required
@csrf.csrf_protect
@decorators.check_spam('text')
def edit_answer(request, id):
answer = get_object_or_404(models.Post, id=id)
revision = answer.get_latest_revision()
class_path = getattr(settings, 'ASKBOT_EDIT_ANSWER_FORM', None)
if class_path:
edit_answer_form_class = load_module(class_path)
else:
edit_answer_form_class = forms.EditAnswerForm
try:
request.user.assert_can_edit_answer(answer)
if request.method == "POST":
if request.POST['select_revision'] == 'true':
# user has changed revistion number
revision_form = forms.RevisionForm(
answer,
revision,
request.POST
)
if revision_form.is_valid():
# Replace with those from the selected revision
rev = revision_form.cleaned_data['revision']
revision = answer.revisions.get(revision = rev)
form = edit_answer_form_class(
answer, revision, user=request.user
)
else:
form = edit_answer_form_class(
answer,
revision,
request.POST,
user=request.user
)
else:
form = edit_answer_form_class(
answer, revision, request.POST, user=request.user
)
revision_form = forms.RevisionForm(answer, revision)
if form.is_valid():
if form.has_changed():
user = form.get_post_user(request.user)
suppress_email = form.cleaned_data['suppress_email']
is_private = form.cleaned_data.get('post_privately', False)
user.edit_answer(
answer=answer,
body_text=form.cleaned_data['text'],
revision_comment=form.cleaned_data['summary'],
wiki=form.cleaned_data.get('wiki', answer.wiki),
is_private=is_private,
suppress_email=suppress_email
)
signals.answer_edited.send(None,
answer=answer,
user=user,
form_data=form.cleaned_data
)
return HttpResponseRedirect(answer.get_absolute_url())
else:
revision_form = forms.RevisionForm(answer, revision)
form = edit_answer_form_class(answer, revision, user=request.user)
if request.user.can_make_group_private_posts():
form.initial['post_privately'] = answer.is_private()
data = {
'page_class': 'edit-answer-page',
'active_tab': 'questions',
'answer': answer,
'revision': revision,
'revision_form': revision_form,
'form': form,
}
extra_context = context.get_extra(
'ASKBOT_EDIT_ANSWER_PAGE_EXTRA_CONTEXT',
request,
data
)
data.update(extra_context)
return render(request, 'answer_edit.html', data)
except exceptions.PermissionDenied, e:
request.user.message_set.create(message = unicode(e))
return HttpResponseRedirect(answer.get_absolute_url())
#todo: rename this function to post_new_answer
@decorators.check_authorization_to_post(ugettext_lazy('Please log in to make posts'))
@decorators.check_spam('text')
def answer(request, id, form_class=forms.AnswerForm):#process a new answer
"""view that posts new answer
anonymous users post into anonymous storage
and redirected to login page
authenticated users post directly
"""
question = get_object_or_404(models.Post, post_type='question', id=id)
if request.method == "POST":
#this check prevents backward compatilibility
if form_class == forms.AnswerForm:
custom_class_path = getattr(settings, 'ASKBOT_NEW_ANSWER_FORM', None)
if custom_class_path:
form_class = load_module(custom_class_path)
else:
form_class = forms.AnswerForm
form = form_class(request.POST, user=request.user)
if form.is_valid():
if request.user.is_authenticated():
drafts = models.DraftAnswer.objects.filter(
author=request.user,
thread=question.thread
)
drafts.delete()
user = form.get_post_user(request.user)
try:
answer = form.save(question, user)
signals.new_answer_posted.send(None,
answer=answer,
user=user,
form_data=form.cleaned_data
)
return HttpResponseRedirect(answer.get_absolute_url())
except askbot_exceptions.AnswerAlreadyGiven, e:
request.user.message_set.create(message = unicode(e))
answer = question.thread.get_answers_by_user(user)[0]
return HttpResponseRedirect(answer.get_absolute_url())
except exceptions.PermissionDenied, e:
request.user.message_set.create(message = unicode(e))
else:
request.session.flush()
models.AnonymousAnswer.objects.create(
question=question,
wiki=form.cleaned_data['wiki'],
text=form.cleaned_data['text'],
session_key=request.session.session_key,
ip_addr=request.META['REMOTE_ADDR'],
)
return HttpResponseRedirect(url_utils.get_login_url())
return HttpResponseRedirect(question.get_absolute_url())
def __generate_comments_json(obj, user):#non-view generates json data for the post comments
"""non-view generates json data for the post comments
"""
models.Post.objects.precache_comments(for_posts=[obj], visitor=user)
comments = obj._cached_comments
# {"Id":6,"PostId":38589,"CreationDate":"an hour ago","Text":"hello there!","UserDisplayName":"Jarrod Dixon","UserUrl":"/users/3/jarrod-dixon","DeleteUrl":null}
json_comments = []
for comment in comments:
if user and user.is_authenticated():
try:
user.assert_can_delete_comment(comment)
#/posts/392845/comments/219852/delete
#todo translate this url
is_deletable = True
except exceptions.PermissionDenied:
is_deletable = False
is_editable = template_filters.can_edit_comment(user, comment)
else:
is_deletable = False
is_editable = False
comment_owner = comment.author
tz = ' ' + template_filters.TIMEZONE_STR
comment_data = {'id' : comment.id,
'object_id': obj.id,
'comment_added_at': str(comment.added_at.replace(microsecond = 0)) + tz,
'html': comment.html,
'user_display_name': escape(comment_owner.username),
'user_url': comment_owner.get_profile_url(),
'user_id': comment_owner.id,
'is_deletable': is_deletable,
'is_editable': is_editable,
'points': comment.points,
'score': comment.points, #to support js
'upvoted_by_user': getattr(comment, 'upvoted_by_user', False)
}
json_comments.append(comment_data)
data = simplejson.dumps(json_comments)
return HttpResponse(data, mimetype="application/json")
@csrf.csrf_exempt
@decorators.check_spam('comment')
def post_comments(request):#generic ajax handler to load comments to an object
"""todo: fixme: post_comments is ambigous:
means either get comments for post or
add a new comment to post
"""
# only support get post comments by ajax now
post_type = request.REQUEST.get('post_type', '')
if not request.is_ajax() or post_type not in ('question', 'answer'):
raise Http404 # TODO: Shouldn't be 404! More like 400, 403 or sth more specific
user = request.user
if request.method == 'POST':
form = forms.NewCommentForm(request.POST)
elif request.method == 'GET':
form = forms.GetDataForPostForm(request.GET)
if form.is_valid() == False:
return HttpResponseBadRequest(
_('This content is forbidden'),
mimetype='application/json'
)
post_id = form.cleaned_data['post_id']
try:
post = models.Post.objects.get(id=post_id)
except models.Post.DoesNotExist:
return HttpResponseBadRequest(
_('Post not found'), mimetype='application/json'
)
if request.method == "GET":
response = __generate_comments_json(post, user)
elif request.method == "POST":
try:
if user.is_anonymous():
msg = _('Sorry, you appear to be logged out and '
'cannot post comments. Please '
'<a href="%(sign_in_url)s">sign in</a>.') % \
{'sign_in_url': url_utils.get_login_url()}
raise exceptions.PermissionDenied(msg)
comment = user.post_comment(
parent_post=post, body_text=form.cleaned_data['comment']
)
signals.new_comment_posted.send(None,
comment=comment,
user=user,
form_data=form.cleaned_data
)
response = __generate_comments_json(post, user)
except exceptions.PermissionDenied, e:
response = HttpResponseForbidden(unicode(e), mimetype="application/json")
return response
@csrf.csrf_exempt
@decorators.ajax_only
#@decorators.check_spam('comment')
def edit_comment(request):
if request.user.is_anonymous():
raise exceptions.PermissionDenied(_('Sorry, anonymous users cannot edit comments'))
form = forms.EditCommentForm(request.POST)
if form.is_valid() == False:
raise exceptions.PermissionDenied('This content is forbidden')
comment_post = models.Post.objects.get(
post_type='comment',
id=form.cleaned_data['comment_id']
)
request.user.edit_comment(
comment_post=comment_post,
body_text=form.cleaned_data['comment'],
suppress_email=form.cleaned_data['suppress_email']
)
is_deletable = template_filters.can_delete_comment(
comment_post.author, comment_post)
is_editable = template_filters.can_edit_comment(
comment_post.author, comment_post)
tz = ' ' + template_filters.TIMEZONE_STR
tz = template_filters.TIMEZONE_STR
timestamp = str(comment_post.added_at.replace(microsecond=0)) + tz
return {
'id' : comment_post.id,
'object_id': comment_post.parent.id,
'comment_added_at': timestamp,
'html': comment_post.html,
'user_display_name': escape(comment_post.author.username),
'user_url': comment_post.author.get_profile_url(),
'user_id': comment_post.author.id,
'is_deletable': is_deletable,
'is_editable': is_editable,
'score': comment_post.points, #to support unchanged js
'points': comment_post.points,
'voted': comment_post.is_upvoted_by(request.user),
}
@csrf.csrf_exempt
def delete_comment(request):
"""ajax handler to delete comment
"""
try:
if request.user.is_anonymous():
msg = _('Sorry, you appear to be logged out and '
'cannot delete comments. Please '
'<a href="%(sign_in_url)s">sign in</a>.') % \
{'sign_in_url': url_utils.get_login_url()}
raise exceptions.PermissionDenied(msg)
if request.is_ajax():
form = forms.DeleteCommentForm(request.POST)
if form.is_valid() == False:
return HttpResponseBadRequest()
comment_id = form.cleaned_data['comment_id']
comment = get_object_or_404(models.Post, post_type='comment', id=comment_id)
request.user.assert_can_delete_comment(comment)
parent = comment.parent
comment.delete()
#attn: recalc denormalized field
parent.comment_count = parent.comments.count()
parent.save()
parent.thread.invalidate_cached_data()
return __generate_comments_json(parent, request.user)
raise exceptions.PermissionDenied(
_('sorry, we seem to have some technical difficulties')
)
except exceptions.PermissionDenied, e:
return HttpResponseForbidden(
unicode(e),
mimetype = 'application/json'
)
@decorators.post_only
def comment_to_answer(request):
try:
comment_id = int(request.POST.get('comment_id'))
except (ValueError, TypeError):
#type or value error is raised is int() fails
raise Http404
comment = get_object_or_404(
models.Post,
post_type='comment',
id=comment_id
)
request.user.repost_comment_as_answer(comment)
return HttpResponseRedirect(comment.get_absolute_url())
@decorators.post_only
@csrf.csrf_protect
#todo: change the urls config for this
def repost_answer_as_comment(request, destination=None):
assert(
destination in (
'comment_under_question',
'comment_under_previous_answer'
)
)
answer_id = request.POST.get('answer_id')
if answer_id:
answer_id = int(answer_id)
answer = get_object_or_404(models.Post,
post_type = 'answer', id=answer_id)
if destination == 'comment_under_question':
destination_post = answer.thread._question_post()
else:
#comment_under_previous_answer
destination_post = answer.get_previous_answer(user=request.user)
#todo: implement for comment under other answer
if destination_post is None:
message = _('Error - could not find the destination post')
request.user.message_set.create(message=message)
return HttpResponseRedirect(answer.get_absolute_url())
if len(answer.text) <= askbot_settings.MAX_COMMENT_LENGTH:
answer.post_type = 'comment'
answer.parent = destination_post
new_comment_count = answer.comments.count() + 1
answer.comment_count = 0
answer_comments = models.Post.objects.get_comments().filter(parent=answer)
answer_comments.update(parent=destination_post)
#why this and not just "save"?
answer.parse_and_save(author=answer.author)
answer.thread.update_answer_count()
answer.parent.comment_count += new_comment_count
answer.parent.save()
answer.thread.invalidate_cached_data()
else:
message = _(
'Cannot convert, because text has more characters than '
'%(max_chars)s - maximum allowed for comments'
) % {'max_chars': askbot_settings.MAX_COMMENT_LENGTH}
request.user.message_set.create(message=message)
return HttpResponseRedirect(answer.get_absolute_url())
else:
raise Http404
|
PearsonIOKI/compose-forum
|
askbot/views/writers.py
|
Python
|
gpl-3.0
| 35,666
|
[
"VisIt"
] |
81ba7a180f9f91966d1d0085edf1cdc1d705eb6ad82eb5ec6ec3fbea548344bf
|
''' LogStatusAction
'''
__RCSID__ = '$Id$'
from DIRAC import S_ERROR
from DIRAC.ResourceStatusSystem.Client.ResourceStatusClient import ResourceStatusClient
from DIRAC.ResourceStatusSystem.PolicySystem.Actions.BaseAction import BaseAction
class LogStatusAction(BaseAction):
'''
Action that registers on the database a new entry on the <element>Status table.
It adds or modifies if the record exists on the table.
'''
def __init__(self, name, decisionParams, enforcementResult, singlePolicyResults,
clients=None):
super(LogStatusAction, self).__init__(name, decisionParams, enforcementResult,
singlePolicyResults, clients)
if clients is not None and 'ResourceStatusClient' in clients:
self.rsClient = clients['ResourceStatusClient']
else:
self.rsClient = ResourceStatusClient()
def run(self):
'''
Checks it has the parameters it needs and tries to addOrModify in the
database.
'''
# Minor security checks
element = self.decisionParams['element']
if element is None:
return S_ERROR('element should not be None')
name = self.decisionParams['name']
if name is None:
return S_ERROR('name should not be None')
statusType = self.decisionParams['statusType']
if statusType is None:
return S_ERROR('statusType should not be None')
status = self.enforcementResult['Status']
if status is None:
return S_ERROR('status should not be None')
elementType = self.decisionParams['elementType']
if elementType is None:
return S_ERROR('elementType should not be None')
reason = self.enforcementResult['Reason']
if reason is None:
return S_ERROR('reason should not be None')
# Truncate reason to fit in database column
reason = (reason[:508] + '..') if len(reason) > 508 else reason
resLogUpdate = self.rsClient.addOrModifyStatusElement(element, 'Status',
name=name, statusType=statusType,
status=status, elementType=elementType,
reason=reason
)
return resLogUpdate
################################################################################
# EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF
|
chaen/DIRAC
|
ResourceStatusSystem/PolicySystem/Actions/LogStatusAction.py
|
Python
|
gpl-3.0
| 2,480
|
[
"DIRAC"
] |
f70b7f36d542a400d3773272783467205086e34b49489c466f87c078dcd9d6eb
|
########################################################################
# File : ComputingElement.py
# Author : Stuart Paterson, A.T.
########################################################################
""" The Computing Element class is a base class for all the various
types CEs. It serves several purposes:
- collects general CE related parameters to generate CE description
for the job matching
- provides logic for evaluation of the number of available CPU slots
- provides logic for the proxy renewal while executing jobs
The CE parameters are collected from the following sources, in hierarchy
descending order:
- parameters provided through setParameters() method of the class
- parameters in /LocalSite configuration section
- parameters in /LocalSite/<ceName>/ResourceDict configuration section
- parameters in /LocalSite/ResourceDict configuration section
- parameters in /LocalSite/<ceName> configuration section
- parameters in /Resources/Computing/<ceName> configuration section
- parameters in /Resources/Computing/CEDefaults configuration section
The ComputingElement objects are usually instantiated with the help of
ComputingElementFactory.
"""
from __future__ import print_function
import os
import multiprocessing
from DIRAC.ConfigurationSystem.Client.Config import gConfig
from DIRAC.Core.Security.ProxyFile import writeToProxyFile
from DIRAC.Core.Security.ProxyInfo import getProxyInfoAsString
from DIRAC.Core.Security.ProxyInfo import formatProxyInfoAsString
from DIRAC.Core.Security.ProxyInfo import getProxyInfo
from DIRAC.FrameworkSystem.Client.ProxyManagerClient import gProxyManager
from DIRAC.Core.Security.VOMS import VOMS
from DIRAC.ConfigurationSystem.Client.Helpers import Registry
from DIRAC.Core.Security import Properties
from DIRAC.Core.Utilities.Time import dateTime, second
from DIRAC import S_OK, S_ERROR, gLogger, version
from DIRAC.Core.Utilities.ObjectLoader import ObjectLoader
__RCSID__ = "$Id$"
INTEGER_PARAMETERS = ['CPUTime', 'NumberOfProcessors']
FLOAT_PARAMETERS = []
LIST_PARAMETERS = ['Tag', 'RequiredTag']
WAITING_TO_RUNNING_RATIO = 0.5
MAX_WAITING_JOBS = 1
MAX_TOTAL_JOBS = 1
class ComputingElement(object):
""" ComputingElement base class """
#############################################################################
def __init__(self, ceName):
""" Standard constructor
"""
self.log = gLogger.getSubLogger(ceName)
self.ceName = ceName
self.ceType = ''
self.ceParameters = {}
self.proxy = ''
self.valid = None
self.mandatoryParameters = []
self.batch = None
self.batchSystem = None
self.batchModuleFile = None
self.minProxyTime = gConfig.getValue('/Registry/MinProxyLifeTime', 10800) # secs
self.defaultProxyTime = gConfig.getValue('/Registry/DefaultProxyLifeTime', 43200) # secs
self.proxyCheckPeriod = gConfig.getValue('/Registry/ProxyCheckingPeriod', 3600) # secs
self.initializeParameters()
def setProxy(self, proxy, valid=0):
""" Set proxy for this instance
"""
self.proxy = proxy
self.valid = dateTime() + second * valid
def _prepareProxy(self):
""" Set the environment variable X509_USER_PROXY
"""
if not self.proxy:
result = getProxyInfo()
if not result['OK']:
return S_ERROR("No proxy available")
if "path" in result['Value']:
os.environ['X509_USER_PROXY'] = result['Value']['path']
return S_OK()
else:
result = gProxyManager.dumpProxyToFile(self.proxy, requiredTimeLeft=self.minProxyTime)
if not result['OK']:
return result
os.environ['X509_USER_PROXY'] = result['Value']
gLogger.debug("Set proxy variable X509_USER_PROXY to %s" % os.environ['X509_USER_PROXY'])
return S_OK()
def isProxyValid(self, valid=1000):
""" Check if the stored proxy is valid
"""
if not self.valid:
result = S_ERROR('Proxy is not valid for the requested length')
result['Value'] = 0
return result
delta = self.valid - dateTime()
totalSeconds = delta.days * 86400 + delta.seconds
if totalSeconds > valid:
return S_OK(totalSeconds - valid)
result = S_ERROR('Proxy is not valid for the requested length')
result['Value'] = totalSeconds - valid
return result
def initializeParameters(self):
""" Initialize the CE parameters after they are collected from various sources
"""
# Collect global defaults first
for section in ['/Resources/Computing/CEDefaults', '/Resources/Computing/%s' % self.ceName]:
result = gConfig.getOptionsDict(section)
if result['OK']:
ceOptions = result['Value']
for key in ceOptions:
if key in INTEGER_PARAMETERS:
ceOptions[key] = int(ceOptions[key])
if key in FLOAT_PARAMETERS:
ceOptions[key] = float(ceOptions[key])
if key in LIST_PARAMETERS:
ceOptions[key] = gConfig.getValue(os.path.join(section, key), [])
self.ceParameters.update(ceOptions)
# Get local CE configuration
localConfigDict = getCEConfigDict(self.ceName)
self.ceParameters.update(localConfigDict)
# Adds site level parameters
section = '/LocalSite'
result = gConfig.getOptionsDict(section)
if result['OK'] and result['Value']:
localSiteParameters = result['Value']
self.log.debug('Local site parameters are: %s' % (localSiteParameters))
for option, value in localSiteParameters.iteritems():
if option == 'Architecture':
self.ceParameters['Platform'] = value
self.ceParameters['Architecture'] = value
elif option == 'LocalSE':
self.ceParameters['LocalSE'] = value.split(', ')
else:
self.ceParameters[option] = value
self._addCEConfigDefaults()
def isValid(self):
""" Check the sanity of the Computing Element definition
"""
for par in self.mandatoryParameters:
if par not in self.ceParameters:
return S_ERROR('Missing Mandatory Parameter in Configuration: %s' % par)
return S_OK()
#############################################################################
def _addCEConfigDefaults(self):
"""Method to make sure all necessary Configuration Parameters are defined
"""
self.ceParameters['WaitingToRunningRatio'] = float(
self.ceParameters.get('WaitingToRunningRatio', WAITING_TO_RUNNING_RATIO))
self.ceParameters['MaxWaitingJobs'] = int(self.ceParameters.get('MaxWaitingJobs', MAX_WAITING_JOBS))
self.ceParameters['MaxTotalJobs'] = int(self.ceParameters.get('MaxTotalJobs', MAX_TOTAL_JOBS))
def _reset(self):
""" Make specific CE parameter adjustments after they are collected or added
"""
pass
def loadBatchSystem(self):
""" Instantiate object representing the backend batch system
"""
if self.batchSystem is None:
self.batchSystem = self.ceParameters['BatchSystem']
objectLoader = ObjectLoader()
result = objectLoader.loadObject('Resources.Computing.BatchSystems.%s' % self.batchSystem, self.batchSystem)
if not result['OK']:
gLogger.error('Failed to load batch object: %s' % result['Message'])
return result
batchClass = result['Value']
self.batchModuleFile = result['ModuleFile']
self.batch = batchClass()
self.log.info("Batch system class from module: ", self.batchModuleFile)
def setParameters(self, ceOptions):
""" Add parameters from the given dictionary overriding the previous values
:param dict ceOptions: CE parameters dictionary to update already defined ones
"""
self.ceParameters.update(ceOptions)
# At this point we can know the exact type of CE,
# try to get generic parameters for this type
ceType = self.ceParameters.get('CEType')
if ceType:
result = gConfig.getOptionsDict('/Resources/Computing/%s' % ceType)
if result['OK']:
generalCEDict = result['Value']
generalCEDict.update(self.ceParameters)
self.ceParameters = generalCEDict
# If NumberOfProcessors is present in the description but is equal to zero
# interpret it as needing local evaluation
if self.ceParameters.get("NumberOfProcessors", -1) == 0:
self.ceParameters["NumberOfProcessors"] = multiprocessing.cpu_count()
for key in ceOptions:
if key in INTEGER_PARAMETERS:
self.ceParameters[key] = int(self.ceParameters[key])
if key in FLOAT_PARAMETERS:
self.ceParameters[key] = float(self.ceParameters[key])
self._reset()
return S_OK()
def getParameterDict(self):
""" Get the CE complete parameter dictionary
"""
return self.ceParameters
#############################################################################
def setCPUTimeLeft(self, cpuTimeLeft=None):
"""Update the CPUTime parameter of the CE classAd, necessary for running in filling mode
"""
if not cpuTimeLeft:
# do nothing
return S_OK()
try:
intCPUTimeLeft = int(cpuTimeLeft)
except ValueError:
return S_ERROR('Wrong type for setCPUTimeLeft argument')
self.ceParameters['CPUTime'] = intCPUTimeLeft
return S_OK(intCPUTimeLeft)
#############################################################################
def available(self, jobIDList=None):
"""This method returns the number of available slots in the target CE. The CE
instance polls for waiting and running jobs and compares to the limits
in the CE parameters.
:param jobIDList: list of already existing job IDs to be checked against
:type jobIDList: python:list
"""
# If there are no already registered jobs
if jobIDList is not None and not jobIDList:
result = S_OK()
result['RunningJobs'] = 0
result['WaitingJobs'] = 0
result['SubmittedJobs'] = 0
else:
result = self.ceParameters.get('CEType')
if result and result == 'CREAM':
result = self.getCEStatus(jobIDList)
else:
result = self.getCEStatus()
if not result['OK']:
return result
runningJobs = result['RunningJobs']
waitingJobs = result['WaitingJobs']
submittedJobs = result['SubmittedJobs']
availableProcessors = result.get('AvailableProcessors')
ceInfoDict = dict(result)
maxTotalJobs = int(self.ceParameters.get('MaxTotalJobs', 0))
ceInfoDict['MaxTotalJobs'] = maxTotalJobs
waitingToRunningRatio = float(self.ceParameters.get('WaitingToRunningRatio', 0.0))
# if there are no Running job we can submit to get at most 'MaxWaitingJobs'
# if there are Running jobs we can increase this to get a ratio W / R 'WaitingToRunningRatio'
maxWaitingJobs = int(max(int(self.ceParameters.get('MaxWaitingJobs', 0)),
runningJobs * waitingToRunningRatio))
self.log.verbose('Max Number of Jobs:', maxTotalJobs)
self.log.verbose('Max W/R Ratio:', waitingToRunningRatio)
self.log.verbose('Max Waiting Jobs:', maxWaitingJobs)
# Determine how many more jobs can be submitted
message = '%s CE: SubmittedJobs=%s' % (self.ceName, submittedJobs)
message += ', WaitingJobs=%s, RunningJobs=%s' % (waitingJobs, runningJobs)
totalJobs = runningJobs + waitingJobs
message += ', MaxTotalJobs=%s' % (maxTotalJobs)
if totalJobs >= maxTotalJobs:
self.log.verbose('Max Number of Jobs reached:', maxTotalJobs)
result['Value'] = 0
message = 'There are %s waiting jobs and total jobs %s >= %s max total jobs' % (
waitingJobs, totalJobs, maxTotalJobs)
else:
additionalJobs = 0
if waitingJobs < maxWaitingJobs:
additionalJobs = maxWaitingJobs - waitingJobs
if totalJobs + additionalJobs >= maxTotalJobs:
additionalJobs = maxTotalJobs - totalJobs
# For SSH CE case
if int(self.ceParameters.get('MaxWaitingJobs', 0)) == 0:
additionalJobs = maxTotalJobs - runningJobs
if availableProcessors is not None:
additionalJobs = min(additionalJobs, availableProcessors)
result['Value'] = additionalJobs
result['Message'] = message
result['CEInfoDict'] = ceInfoDict
return result
#############################################################################
def writeProxyToFile(self, proxy):
"""CE helper function to write a CE proxy string to a file.
"""
result = writeToProxyFile(proxy)
if not result['OK']:
self.log.error('Could not write proxy to file', result['Message'])
return result
proxyLocation = result['Value']
result = getProxyInfoAsString(proxyLocation)
if not result['OK']:
self.log.error('Could not get proxy info', result)
return result
else:
self.log.info('Payload proxy information:')
print(result['Value'])
return S_OK(proxyLocation)
#############################################################################
def _monitorProxy(self, pilotProxy, payloadProxy):
"""Base class for the monitor and update of the payload proxy, to be used in
derived classes for the basic renewal of the proxy, if further actions are
necessary they should be implemented there
"""
retVal = getProxyInfo(payloadProxy)
if not retVal['OK']:
self.log.error('Could not get payload proxy info', retVal)
return retVal
self.log.verbose('Payload Proxy information:\n%s' % formatProxyInfoAsString(retVal['Value']))
payloadProxyDict = retVal['Value']
payloadSecs = payloadProxyDict['chain'].getRemainingSecs()['Value']
if payloadSecs > self.minProxyTime:
self.log.verbose('No need to renew payload Proxy')
return S_OK()
# if there is no pilot proxy, assume there is a certificate and try a renewal
if not pilotProxy:
self.log.info('Using default credentials to get a new payload Proxy')
return gProxyManager.renewProxy(proxyToBeRenewed=payloadProxy, minLifeTime=self.minProxyTime,
newProxyLifeTime=self.defaultProxyTime,
proxyToConnect=pilotProxy)
# if there is pilot proxy
retVal = getProxyInfo(pilotProxy)
if not retVal['OK']:
return retVal
pilotProxyDict = retVal['Value']
if 'groupProperties' not in pilotProxyDict:
self.log.error('Invalid Pilot Proxy', 'Group has no properties defined')
return S_ERROR('Proxy has no group properties defined')
pilotProps = pilotProxyDict['groupProperties']
# if running with a pilot proxy, use it to renew the proxy of the payload
if Properties.PILOT in pilotProps or Properties.GENERIC_PILOT in pilotProps:
self.log.info('Using Pilot credentials to get a new payload Proxy')
return gProxyManager.renewProxy(proxyToBeRenewed=payloadProxy, minLifeTime=self.minProxyTime,
newProxyLifeTime=self.defaultProxyTime,
proxyToConnect=pilotProxy)
# if we are running with other type of proxy check if they are for the same user and group
# and copy the pilot proxy if necessary
self.log.info('Trying to copy pilot Proxy to get a new payload Proxy')
pilotProxySecs = pilotProxyDict['chain'].getRemainingSecs()['Value']
if pilotProxySecs <= payloadSecs:
errorStr = 'Pilot Proxy is not longer than payload Proxy'
self.log.error(errorStr)
return S_ERROR('Can not renew by copy: %s' % errorStr)
# check if both proxies belong to the same user and group
pilotDN = pilotProxyDict['chain'].getIssuerCert()['Value'].getSubjectDN()['Value']
retVal = pilotProxyDict['chain'].getDIRACGroup()
if not retVal['OK']:
return retVal
pilotGroup = retVal['Value']
payloadDN = payloadProxyDict['chain'].getIssuerCert()['Value'].getSubjectDN()['Value']
retVal = payloadProxyDict['chain'].getDIRACGroup()
if not retVal['OK']:
return retVal
payloadGroup = retVal['Value']
if pilotDN != payloadDN or pilotGroup != payloadGroup:
errorStr = 'Pilot Proxy and payload Proxy do not have same DN and Group'
self.log.error(errorStr)
return S_ERROR('Can not renew by copy: %s' % errorStr)
if pilotProxyDict.get('hasVOMS', False):
return pilotProxyDict['chain'].dumpAllToFile(payloadProxy)
attribute = Registry.getVOMSAttributeForGroup(payloadGroup)
vo = Registry.getVOMSVOForGroup(payloadGroup)
retVal = VOMS().setVOMSAttributes(pilotProxyDict['chain'], attribute=attribute, vo=vo)
if not retVal['OK']:
return retVal
chain = retVal['Value']
return chain.dumpAllToFile(payloadProxy)
def getDescription(self):
""" Get CE description as a dictionary
"""
ceDict = {}
for option, value in self.ceParameters.iteritems():
if isinstance(value, list):
ceDict[option] = value
elif isinstance(value, basestring):
try:
ceDict[option] = int(value)
except ValueError:
ceDict[option] = value
elif isinstance(value, (int, long, float)):
ceDict[option] = value
else:
self.log.warn('Type of option %s = %s not determined' % (option, value))
release = gConfig.getValue('/LocalSite/ReleaseVersion', version)
ceDict['DIRACVersion'] = release
ceDict['ReleaseVersion'] = release
project = gConfig.getValue("/LocalSite/ReleaseProject", "")
if project:
ceDict['ReleaseProject'] = project
result = self.getCEStatus()
if result['OK']:
if 'AvailableProcessors' in result:
cores = result['AvailableProcessors']
ceDict['NumberOfProcessors'] = cores
return S_OK(ceDict)
#############################################################################
def sendOutput(self, stdid, line): # pylint: disable=unused-argument, no-self-use
""" Callback function such that the results from the CE may be returned.
"""
print(line)
#############################################################################
def submitJob(self, executableFile, proxy, dummy=None, processors=1): # pylint: disable=unused-argument
""" Method to submit job, should be overridden in sub-class.
"""
name = 'submitJob()'
self.log.error('ComputingElement should be implemented in a subclass', name)
return S_ERROR('ComputingElement: %s should be implemented in a subclass' % (name))
#############################################################################
def getCEStatus(self, jobIDList=None): # pylint: disable=unused-argument
""" Method to get dynamic job information, can be overridden in sub-class.
"""
name = 'getCEStatus()'
self.log.error('ComputingElement should be implemented in a subclass', name)
return S_ERROR('ComputingElement: %s should be implemented in a subclass' % (name))
def getCEConfigDict(ceName):
"""Look into LocalSite for configuration Parameters for this CE
"""
ceConfigDict = {}
if ceName:
result = gConfig.getOptionsDict('/LocalSite/%s' % ceName)
if result['OK']:
ceConfigDict = result['Value']
return ceConfigDict
|
chaen/DIRAC
|
Resources/Computing/ComputingElement.py
|
Python
|
gpl-3.0
| 19,132
|
[
"DIRAC"
] |
80974408ea3db1c6ab39e8be09cf2d862d014ed3038f6b350fd34ae27656b672
|
"""
KeepNote
Editor widget in main window
"""
#
# KeepNote
# Copyright (c) 2008-2009 Matt Rasmussen
# Author: Matt Rasmussen <rasmus@mit.edu>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
#
# python imports
import gettext
import sys, os
# pygtk imports
import pygtk
pygtk.require('2.0')
from gtk import gdk
import gtk.glade
import gobject
# keepnote imports
import keepnote
from keepnote import \
KeepNoteError, is_url, unicode_gtk
from keepnote.notebook import \
NoteBookError, \
get_node_url, \
parse_node_url, \
is_node_url
from keepnote import notebook as notebooklib
from keepnote import safefile
from keepnote.gui import richtext
from keepnote.gui.richtext import \
RichTextView, RichTextBuffer, \
RichTextIO, RichTextError
from keepnote.gui import \
CONTEXT_MENU_ACCEL_PATH, \
FileChooserDialog, \
get_resource, \
Action, \
ToggleAction, \
add_actions, \
dialog_find
from keepnote.gui.editor import KeepNoteEditor
_ = keepnote.translate
class TextEditor (KeepNoteEditor):
def __init__(self, app):
KeepNoteEditor.__init__(self, app)
self._app = app
self._notebook = None
# state
self._page = None # current NoteBookPage
self._page_scrolls = {} # remember scroll in each page
self._page_cursors = {}
self._textview_io = RichTextIO()
# textview and its callbacks
self._textview = RichTextView(RichTextBuffer(
self._app.get_richtext_tag_table())) # textview
self._textview.disable()
self._textview.connect("modified", self._on_modified_callback)
self._textview.connect("visit-url", self._on_visit_url)
# scrollbars
self._sw = gtk.ScrolledWindow()
self._sw.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
self._sw.set_shadow_type(gtk.SHADOW_IN)
self._sw.add(self._textview)
self.pack_start(self._sw)
#self._socket = gtk.Socket()
#self.pack_start(self._socket)
# menus
self.editor_menus = EditorMenus(self._app, self)
# find dialog
self.find_dialog = dialog_find.KeepNoteFindDialog(self)
self.show_all()
def set_notebook(self, notebook):
"""Set notebook for editor"""
# set new notebook
self._notebook = notebook
if self._notebook:
# read default font
pass
else:
# no new notebook, clear the view
self.clear_view()
def load_preferences(self, app_pref, first_open=False):
"""Load application preferences"""
self.editor_menus.enable_spell_check(
self._app.pref.get("editors", "general", "spell_check",
default=True))
self._textview.set_default_font("Monospace 10")
def save_preferences(self, app_pref):
"""Save application preferences"""
# record state in preferences
app_pref.set("editors", "general", "spell_check",
self._textview.is_spell_check_enabled())
def get_textview(self):
"""Return the textview"""
return self._textview
def is_focus(self):
"""Return True if text editor has focus"""
return self._textview.is_focus()
def grab_focus(self):
"""Pass focus to textview"""
self._textview.grab_focus()
def clear_view(self):
"""Clear editor view"""
self._page = None
self._textview.disable()
def undo(self):
"""Undo the last action in the viewer"""
self._textview.undo()
def redo(self):
"""Redo the last action in the viewer"""
self._textview.redo()
def view_pages(self, pages):
"""View a page in the editor"""
# editor cannot view multiple pages at once
# if asked to, it will view none
if len(pages) > 1:
pages = []
# save current page before changing pages
self.save()
self._save_cursor()
if len(pages) == 0:
self.clear_view()
else:
page = pages[0]
self._page = page
self._textview.enable()
try:
if page.has_attr("payload_filename"):
infile = page.open_file(
page.get_attr("payload_filename"))
text = infile.read()
infile.close()
self._textview.get_buffer().set_text(text)
self._load_cursor()
else:
self.clear_view()
except UnicodeDecodeError, e:
self.clear_view()
except RichTextError, e:
self.clear_view()
self.emit("error", e.msg, e)
except Exception, e:
keepnote.log_error()
self.clear_view()
self.emit("error", "Unknown error", e)
if len(pages) > 0:
self.emit("view-node", pages[0])
def _save_cursor(self):
if self._page is not None:
it = self._textview.get_buffer().get_iter_at_mark(
self._textview.get_buffer().get_insert())
self._page_cursors[self._page] = it.get_offset()
x, y = self._textview.window_to_buffer_coords(
gtk.TEXT_WINDOW_TEXT, 0, 0)
it = self._textview.get_iter_at_location(x, y)
self._page_scrolls[self._page] = it.get_offset()
def _load_cursor(self):
# place cursor in last location
if self._page in self._page_cursors:
offset = self._page_cursors[self._page]
it = self._textview.get_buffer().get_iter_at_offset(offset)
self._textview.get_buffer().place_cursor(it)
# place scroll in last position
if self._page in self._page_scrolls:
offset = self._page_scrolls[self._page]
buf = self._textview.get_buffer()
it = buf.get_iter_at_offset(offset)
mark = buf.create_mark(None, it, True)
self._textview.scroll_to_mark(mark,
0.49, use_align=True, xalign=0.0)
buf.delete_mark(mark)
def save(self):
"""Save the loaded page"""
if self._page is not None and \
self._page.is_valid() and \
self._textview.is_modified():
try:
# save text data
buf = self._textview.get_buffer()
text = unicode_gtk(buf.get_text(buf.get_start_iter(),
buf.get_end_iter()))
out = self._page.open_file(
self._page.get_attr("payload_filename"), "w")
out.write(text)
out.close()
# save meta data
self._page.set_attr_timestamp("modified_time")
self._page.save()
except RichTextError, e:
self.emit("error", e.msg, e)
except NoteBookError, e:
self.emit("error", e.msg, e)
except Exception, e:
self.emit("error", str(e), e)
def save_needed(self):
"""Returns True if textview is modified"""
return self._textview.is_modified()
return False
def add_ui(self, window):
self._textview.set_accel_group(window.get_accel_group())
self._textview.set_accel_path(CONTEXT_MENU_ACCEL_PATH)
self.editor_menus.add_ui(window)
def remove_ui(self, window):
self.editor_menus.remove_ui(window)
#===========================================
# callbacks for textview
def _on_modified_callback(self, textview, modified):
"""Callback for textview modification"""
self.emit("modified", self._page, modified)
# make notebook node modified
if modified:
self._page.mark_modified()
self._page.notify_change(False)
def _on_visit_url(self, textview, url):
"""Callback for textview visiting a URL"""
if is_node_url(url):
host, nodeid = parse_node_url(url)
node = self._notebook.get_node_by_id(nodeid)
if node:
self.emit("visit-node", node)
else:
try:
self._app.open_webpage(url)
except KeepNoteError, e:
self.emit("error", e.msg, e)
class EditorMenus (gobject.GObject):
def __init__(self, app, editor):
gobject.GObject.__init__(self)
self._app = app
self._editor = editor
self._action_group = None
self._uis = []
self.spell_check_toggle = None
self._removed_widgets = []
#=======================================================
# spellcheck
def enable_spell_check(self, enabled):
"""Spell check"""
self._editor.get_textview().enable_spell_check(enabled)
# see if spell check became enabled
enabled = self._editor.get_textview().is_spell_check_enabled()
# update UI to match
if self.spell_check_toggle:
self.spell_check_toggle.set_active(enabled)
return enabled
def on_spell_check_toggle(self, widget):
"""Toggle spell checker"""
self.enable_spell_check(widget.get_active())
#=====================================================
# toolbar and menus
def add_ui(self, window):
self._action_group = gtk.ActionGroup("Editor")
self._uis = []
add_actions(self._action_group, self.get_actions())
window.get_uimanager().insert_action_group(
self._action_group, 0)
for s in self.get_ui():
self._uis.append(window.get_uimanager().add_ui_from_string(s))
window.get_uimanager().ensure_update()
self.setup_menu(window, window.get_uimanager())
def remove_ui(self, window):
# remove ui
for ui in reversed(self._uis):
window.get_uimanager().remove_ui(ui)
self._uis = []
window.get_uimanager().ensure_update()
# remove action group
window.get_uimanager().remove_action_group(self._action_group)
self._action_group = None
def get_actions(self):
def BothAction(name1, *args):
return [Action(name1, *args), ToggleAction(name1 + " Tool", *args)]
return (map(lambda x: Action(*x), [
# finding
("Find In Page", gtk.STOCK_FIND, _("_Find In Page..."),
"<control>F", None,
lambda w: self._editor.find_dialog.on_find(False)),
("Find Next In Page", gtk.STOCK_FIND, _("Find _Next In Page..."),
"<control>G", None,
lambda w: self._editor.find_dialog.on_find(False, forward=True)),
("Find Previous In Page", gtk.STOCK_FIND,
_("Find Pre_vious In Page..."),
"<control><shift>G", None,
lambda w: self._editor.find_dialog.on_find(False, forward=False)),
("Replace In Page", gtk.STOCK_FIND_AND_REPLACE,
_("_Replace In Page..."),
"<control>R", None,
lambda w: self._editor.find_dialog.on_find(True)),
]) +
[ToggleAction("Spell Check", None, _("_Spell Check"),
"", None,
self.on_spell_check_toggle)]
)
def get_ui(self):
ui = ["""
<ui>
<menubar name="main_menu_bar">
<menu action="Edit">
<placeholder name="Viewer">
<placeholder name="Editor">
<placeholder name="Extension"/>
</placeholder>
</placeholder>
</menu>
<menu action="Search">
<placeholder name="Viewer">
<placeholder name="Editor">
<menuitem action="Find In Page"/>
<menuitem action="Find Next In Page"/>
<menuitem action="Find Previous In Page"/>
<menuitem action="Replace In Page"/>
</placeholder>
</placeholder>
</menu>
<placeholder name="Viewer">
<placeholder name="Editor">
</placeholder>
</placeholder>
<menu action="Go">
<placeholder name="Viewer">
<placeholder name="Editor">
</placeholder>
</placeholder>
</menu>
<menu action="Tools">
<placeholder name="Viewer">
<menuitem action="Spell Check"/>
</placeholder>
</menu>
</menubar>
</ui>
"""]
ui.append("""
<ui>
<toolbar name="main_tool_bar">
<placeholder name="Viewer">
<placeholder name="Editor">
</placeholder>
</placeholder>
</toolbar>
</ui>
""")
return ui
def setup_menu(self, window, uimanager):
u = uimanager
# get spell check toggle
self.spell_check_toggle = \
uimanager.get_widget("/main_menu_bar/Tools/Viewer/Spell Check")
self.spell_check_toggle.set_sensitive(
self._editor.get_textview().can_spell_check())
self.spell_check_toggle.set_active(window.get_app().pref.get(
"editors", "general", "spell_check", default=True))
|
gemagomez/keepnote
|
keepnote/gui/editor_text.py
|
Python
|
gpl-2.0
| 14,584
|
[
"VisIt"
] |
5a7aa2625d316c735427c78e9d5fbd68c9dd906178f2f07001b096bcd233c756
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals
import unittest
import pickle
from pymatgen.util.testing import PymatgenTest
from pymatgen.core.periodic_table import Element, Specie, DummySpecie, get_el_sp
from pymatgen.core.composition import Composition
from copy import deepcopy
class ElementTestCase(PymatgenTest):
def test_init(self):
self.assertEqual("Fe", Element("Fe").symbol, "Fe test failed")
fictional_symbols = ["D", "T", "Zebra"]
for sym in fictional_symbols:
self.assertRaises(ValueError, Element, sym)
# Test caching
self.assertEqual(id(Element("Fe")), id(Element("Fe")))
def test_dict(self):
fe = Element.Fe
d = fe.as_dict()
self.assertEqual(fe, Element.from_dict(d))
def test_block(self):
testsets = {"O": "p", "Fe": "d", "Li": "s", "U": "f", "Er": "f",
"Lu": "d", "Lr": "d"}
for k, v in testsets.items():
self.assertEqual(Element(k).block, v)
def test_full_electronic_structure(self):
testsets = {"O": [(1, "s", 2), (2, "s", 2), (2, "p", 4)],
"Fe": [(1, "s", 2), (2, "s", 2), (2, "p", 6), (3, "s", 2),
(3, "p", 6), (3, "d", 6), (4, "s", 2)],
"Li": [(1, "s", 2), (2, "s", 1)],
"U": [(1, "s", 2), (2, "s", 2), (2, "p", 6), (3, "s", 2),
(3, "p", 6), (3, "d", 10), (4, "s", 2), (4, "p", 6),
(4, "d", 10), (5, "s", 2), (5, "p", 6), (4, "f", 14),
(5, "d", 10), (6, "s", 2), (6, "p", 6), (5, "f", 3),
(6, "d", 1), (7, "s", 2)]}
for k, v in testsets.items():
self.assertEqual(Element(k).full_electronic_structure, v)
def test_attributes(self):
is_true = {("Xe", "Kr"): "is_noble_gas",
("Fe", "Ni"): "is_transition_metal",
("Li", "Cs"): "is_alkali",
("Ca", "Mg"): "is_alkaline",
("F", "Br", "I"): "is_halogen",
("La",): "is_lanthanoid",
("U", "Pu"): "is_actinoid",
("Si", "Ge"): "is_metalloid",
("O", "Te"): "is_chalcogen"}
for k, v in is_true.items():
for sym in k:
self.assertTrue(getattr(Element(sym), v), sym + " is false")
keys = ["name", "mendeleev_no", "atomic_mass",
"electronic_structure", "atomic_radius",
"min_oxidation_state", "max_oxidation_state",
"electrical_resistivity", "velocity_of_sound", "reflectivity",
"refractive_index", "poissons_ratio", "molar_volume",
"thermal_conductivity", "melting_point", "boiling_point",
"liquid_range", "critical_temperature",
"superconduction_temperature",
"bulk_modulus", "youngs_modulus", "brinell_hardness",
"rigidity_modulus", "mineral_hardness",
"vickers_hardness", "density_of_solid",
"coefficient_of_linear_thermal_expansion", "oxidation_states",
"common_oxidation_states", "average_ionic_radius",
"ionic_radii"]
# Test all elements up to Uranium
for i in range(1, 104):
el = Element.from_Z(i)
d = el.data
for k in keys:
k_str = k.capitalize().replace("_", " ")
if k_str in d and (not str(d[k_str]).startswith("no data")):
self.assertIsNotNone(getattr(el, k))
el = Element.from_Z(i)
if len(el.oxidation_states) > 0:
self.assertEqual(max(el.oxidation_states),
el.max_oxidation_state)
self.assertEqual(min(el.oxidation_states),
el.min_oxidation_state)
if el.symbol not in ["He", "Ne", "Ar"]:
self.assertTrue(el.X > 0, "No electroneg for %s" % el)
self.assertRaises(ValueError, Element.from_Z, 1000)
def test_oxidation_states(self):
el = Element.Fe
self.assertEqual(el.oxidation_states, (-2, -1, 1, 2, 3, 4, 5, 6))
self.assertEqual(el.common_oxidation_states, (2, 3))
def test_deepcopy(self):
el1 = Element.Fe
el2 = Element.Na
ellist = [el1, el2]
self.assertEqual(ellist, deepcopy(ellist),
"Deepcopy operation doesn't produce exact copy")
def test_radii(self):
el = Element.Pd
self.assertEqual(el.atomic_radius, 1.40)
self.assertEqual(el.atomic_radius_calculated, 1.69)
self.assertEqual(el.van_der_waals_radius, 1.63)
def test_data(self):
self.assertEqual(Element.Pd.data["Atomic radius"], 1.4)
al = Element.Al
val = al.thermal_conductivity
self.assertEqual(val, 235)
self.assertEqual(str(val.unit), "W K^-1 m^-1")
val = al.electrical_resistivity
self.assertEqual(val, 2.7e-08)
self.assertEqual(str(val.unit), "m ohm")
def test_sort(self):
els = [Element.Se, Element.C]
self.assertEqual(sorted(els), [Element.C, Element.Se])
def test_pickle(self):
el1 = Element.Fe
o = pickle.dumps(el1)
self.assertEqual(el1, pickle.loads(o))
#Test all elements up to Uranium
for i in range(1, 93):
self.serialize_with_pickle(Element.from_Z(i), test_eq=True)
def test_print_periodic_table(self):
Element.print_periodic_table()
class SpecieTestCase(PymatgenTest):
def setUp(self):
self.specie1 = Specie.from_string("Fe2+")
self.specie2 = Specie("Fe", 3)
self.specie3 = Specie("Fe", 2)
self.specie4 = Specie("Fe", 2, {"spin": 5})
def test_init(self):
self.assertRaises(ValueError, Specie, "Fe", 2, {"magmom": 5})
def test_cached(self):
specie5 = Specie("Fe", 2)
self.assertEqual(id(specie5), id(self.specie3))
def test_ionic_radius(self):
self.assertEqual(self.specie2.ionic_radius, 78.5 / 100)
self.assertEqual(self.specie3.ionic_radius, 92 / 100)
self.assertAlmostEqual(Specie("Mn", 4).ionic_radius, 0.67)
def test_eq(self):
self.assertEqual(self.specie1, self.specie3,
"Static and actual constructor gives unequal result!")
self.assertNotEqual(self.specie1, self.specie2,
"Fe2+ should not be equal to Fe3+")
self.assertNotEqual(self.specie4, self.specie3)
self.assertFalse(self.specie1 == Element("Fe"))
self.assertFalse(Element("Fe") == self.specie1)
def test_cmp(self):
self.assertLess(self.specie1, self.specie2, "Fe2+ should be < Fe3+")
self.assertLess(Specie("C", 1), Specie("Se", 1))
def test_attr(self):
self.assertEqual(self.specie1.Z, 26,
"Z attribute for Fe2+ should be = Element Fe.")
self.assertEqual(self.specie4.spin, 5)
def test_deepcopy(self):
el1 = Specie("Fe", 4)
el2 = Specie("Na", 1)
ellist = [el1, el2]
self.assertEqual(ellist, deepcopy(ellist),
"Deepcopy operation doesn't produce exact copy.")
def test_pickle(self):
self.assertEqual(self.specie1, pickle.loads(pickle.dumps(self.specie1)))
for i in range(1, 5):
self.serialize_with_pickle(getattr(self, "specie%d" % i) , test_eq=True)
def test_get_crystal_field_spin(self):
self.assertEqual(Specie("Fe", 2).get_crystal_field_spin(), 4)
self.assertEqual(Specie("Fe", 3).get_crystal_field_spin(), 5)
self.assertEqual(Specie("Fe", 4).get_crystal_field_spin(), 4)
self.assertEqual(Specie("Co", 3).get_crystal_field_spin(
spin_config="low"), 0)
self.assertEqual(Specie("Co", 4).get_crystal_field_spin(
spin_config="low"), 1)
self.assertEqual(Specie("Ni", 3).get_crystal_field_spin(
spin_config="low"), 1)
self.assertEqual(Specie("Ni", 4).get_crystal_field_spin(
spin_config="low"), 0)
self.assertRaises(AttributeError,
Specie("Li", 1).get_crystal_field_spin)
self.assertRaises(AttributeError,
Specie("Ge", 4).get_crystal_field_spin)
self.assertRaises(AttributeError,
Specie("H", 1).get_crystal_field_spin)
self.assertRaises(AttributeError,
Specie("Fe", 10).get_crystal_field_spin)
self.assertRaises(ValueError, Specie("Fe", 2).get_crystal_field_spin,
"hex")
s = Specie("Co", 3).get_crystal_field_spin("tet", spin_config="low")
self.assertEqual(s, 2)
def test_sort(self):
els = map(get_el_sp, ["N3-", "Si4+", "Si3+"])
self.assertEqual(sorted(els), [Specie("Si", 3), Specie("Si", 4),
Specie("N", -3)])
def test_to_from_string(self):
fe3 = Specie("Fe", 3, {"spin": 5})
self.assertEqual(str(fe3), "Fe3+spin=5")
fe = Specie.from_string("Fe3+spin=5")
self.assertEqual(fe.spin, 5)
mo0 = Specie("Mo", 0, {"spin": 5})
self.assertEqual(str(mo0), "Mo0+spin=5")
mo = Specie.from_string("Mo0+spin=4")
self.assertEqual(mo.spin, 4)
class DummySpecieTestCase(unittest.TestCase):
def test_init(self):
self.specie1 = DummySpecie("X")
self.assertRaises(ValueError, DummySpecie, "Xe")
self.assertRaises(ValueError, DummySpecie, "Xec")
self.assertRaises(ValueError, DummySpecie, "Vac")
self.specie2 = DummySpecie("X", 2, {"spin": 3})
self.assertEqual(self.specie2.spin, 3)
def test_cached(self):
sp1 = DummySpecie("X", 2)
sp2 = DummySpecie("X", 2)
self.assertEqual(id(sp1), id(sp2))
def test_eq(self):
self.assertFalse(DummySpecie("Xg") == DummySpecie("Xh"))
self.assertFalse(DummySpecie("Xg") == DummySpecie("Xg", 3))
self.assertTrue(DummySpecie("Xg", 3) == DummySpecie("Xg", 3))
def test_from_string(self):
sp = DummySpecie.from_string("X")
self.assertEqual(sp.oxi_state, 0)
sp = DummySpecie.from_string("X2+")
self.assertEqual(sp.oxi_state, 2)
sp = DummySpecie.from_string("X2+spin=5")
self.assertEqual(sp.oxi_state, 2)
self.assertEqual(sp.spin, 5)
def test_pickle(self):
el1 = DummySpecie("X", 3)
o = pickle.dumps(el1)
self.assertEqual(el1, pickle.loads(o))
def test_sort(self):
r = sorted([Element.Fe, DummySpecie("X")])
self.assertEqual(r, [DummySpecie("X"), Element.Fe])
self.assertTrue(DummySpecie("X", 3) < DummySpecie("X", 4))
def test_safe_from_composition(self):
c = Composition({'Xa': 1, 'Fe': 1})
self.assertEqual(DummySpecie.safe_from_composition(c).symbol, 'Xb')
self.assertEqual(DummySpecie.safe_from_composition(c, 1).symbol, 'Xb')
class FuncTest(unittest.TestCase):
def test_get_el_sp(self):
self.assertEqual(get_el_sp("Fe2+"), Specie("Fe", 2))
self.assertEqual(get_el_sp("3"), Element.Li)
self.assertEqual(get_el_sp("3.0"), Element.Li)
self.assertEqual(get_el_sp("U"), Element.U)
self.assertEqual(get_el_sp("X2+"), DummySpecie("X", 2))
self.assertEqual(get_el_sp("Mn3+"), Specie("Mn", 3))
if __name__ == "__main__":
unittest.main()
|
xhqu1981/pymatgen
|
pymatgen/core/tests/test_periodic_table.py
|
Python
|
mit
| 11,711
|
[
"pymatgen"
] |
a00d8d0df99328b5d62f015516f030eddfc23c174aaf44985e3367b79a836618
|
#
# Copyright (C) 2013,2014 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import print_function
import ctypes
import sys
sys.setdlopenflags((sys.getdlopenflags() | ctypes.RTLD_GLOBAL ))
import espresso as es
import global_variables as g
try:
es.glob.time_step=-0.01
except ValueError:
print("Espresso does not like negative timesteps")
|
ohickey/espresso
|
samples/python/error_checking.py
|
Python
|
gpl-3.0
| 1,011
|
[
"ESPResSo"
] |
c666294267c2a696cc82984b5371b128d4f1f8140bc0d6e8ad000a1ec4a14745
|
#
# @@ All Rights Reserved @@
# This file is part of the RDKit.
# The contents are covered by the terms of the BSD license
# which is included in the file license.txt, found at the root
# of the RDKit source tree.
#
import doctest
import unittest
from rdkit.Chem import inchi
from rdkit.TestRunner import redirect_stderr
import io
if inchi.INCHI_AVAILABLE:
from rdkit.Chem.MolKey.InchiInfo import InchiInfo
try:
from rdkit.Avalon import pyAvalonTools
from rdkit.Chem.MolKey import MolKey
_testMolKey = True
except ImportError:
_testMolKey = False
def load_tests(loader, tests, ignore):
""" Add the Doctests from the module """
if _testMolKey:
tests.addTests(doctest.DocTestSuite(MolKey, optionflags=doctest.ELLIPSIS))
return tests
@unittest.skipUnless(_testMolKey, 'Avalon tools and Inchi required')
class TestMolKey(unittest.TestCase):
def test_GetKeyForCTAB(self):
f = io.StringIO()
with redirect_stderr(f):
res = MolKey.GetKeyForCTAB('IncorrectCTAB')
self.assertNotEqual(res.error, 0)
s = f.getvalue()
self.assertIn('WARNING:', s)
def test_CheckCTAB(self):
self.assertRaises(MolKey.BadMoleculeException, MolKey.CheckCTAB, None)
self.assertRaises(MolKey.BadMoleculeException, MolKey.CheckCTAB, '')
ok, _ = MolKey.CheckCTAB('CCincorrect', isSmiles=True)
self.assertEqual(ok, 1)
ok, _ = MolKey.CheckCTAB('NO_STRUCTURE', isSmiles=True)
self.assertEqual(ok, MolKey.ERROR_DICT['NULL_MOL'])
ok, ctab = MolKey.CheckCTAB('CC', isSmiles=True)
self.assertEqual(ok, 0)
ok, ctab2 = MolKey.CheckCTAB(ctab, isSmiles=False)
self.assertEqual(ok, 0)
self.assertEqual(ctab, ctab2)
def test_GetInchiForCTAB(self):
self.assertRaises(MolKey.BadMoleculeException, MolKey.GetInchiForCTAB, 'IncorrectCTAB')
def test_ErrorBitsToText(self):
errors = MolKey.ErrorBitsToText(3)
self.assertIn('BAD_MOLECULE', errors)
self.assertIn('ALIAS_CONVERSION_FAILED', errors)
for k, v in MolKey.ERROR_DICT.items():
errors = MolKey.ErrorBitsToText(v)
self.assertEqual(len(errors), 1)
self.assertIn(k, errors)
def test_get_chiral_identification_string(self):
cases = [((0, 0), 'S_ACHIR'), # No stereo centers
((0, 1), 'R_ONE'), # One undefined stereo centers
((0, 2), 'S_UNKN'), # More than one undefined stereo centers
((0, 3), 'S_UNKN'), # More than one undefined stereo centers
((1, 0), 'S_ABS'), # Fully defined stereo center
((2, 0), 'S_ABS'), # Fully defined stereo centers
((1, 1), 'S_PART'), # Partially defined stereo centers
((2, 1), 'S_PART'), # Partially defined stereo centers
]
for (nDefined, nUndefined), expected in cases:
self.assertEqual(MolKey._get_chiral_identification_string(nDefined, nUndefined), expected)
GUANINE = 'InChI=1S/C5H5N5O/c6-5-9-3-2(4(11)10-5)7-1-8-3/h1H0,(H4,6,7,8,9,10,11)'
# 'N=C(-O)N', '/FixedH /SUU'
UREA1 = 'InChI=1/CH4N2O/c2-1(3)4/h(H4,2,3,4)/f/h2,4H,3H2/b2-1?'
# 'NC(=O)N', '/FixedH /SUU'
UREA2 = 'InChI=1/CH4N2O/c2-1(3)4/h(H4,2,3,4)/f/h2-3H2'
TRITIATED_UREA = 'InChI=1S/CH4N2O/c2-1(3)4/h(H4,2,3,4)/i/hT3'
DEUTERATED_UREA = 'InChI=1S/CH4N2O/c2-1(3)4/h(H4,2,3,4)/i/hD2'
ACETIC_ACID = 'InChI=1S/C3H6O2/c1-2-3(4)5/h2H2,1H3,(H,4,5)'
ACETATE = 'InChI=1S/C3H6O2/c1-2-3(4)5/h2H2,1H3,(H,4,5)/p-1'
mobile1 = 'InChI=1S/C5H5N3O2/c6-4(9)3-1-7-2-8-5(3)10/h1-2H,(H2,6,9)(H,7,8,10)' # invented
mobile2 = 'InChI=1S/C7H10N4O/c1-4-2-5(3-6(8)12)11-7(9)10-4/h2H,3H2,1H3,(H2,8,12)(H2,9,10,11)'
# sp3 stereo
sugar1 = 'InChI=1S/C14H20O9/c1-6-11(20-7(2)15)12(21-8(3)16)13(22-9(4)17)14(19-6)23-10(5)18/h6,11-14H,1-5H3/t6-,11-,12+,13+,14?/m0/s1' # L-rhamnopyranose (source: chemspider)
sugar2 = 'InChI=1S/C12H20O6/c1-11(2)14-5-6(16-11)8-7(13)9-10(15-8)18-12(3,4)17-9/h6-10,13H,5H2,1-4H3/t6-,7-,8-,9-,10-/m1/s1' # MFCD00135634 (Diacetone-D-Glucose, souce: chemspider)
sp3_unk = 'InChI=1S/C12H21NO4/c1-8(2)10(12(15)16-3)13-11(14)9-5-4-6-17-7-9/h8-10H,4-7H2,1-3H3,(H,13,14)/t9?,10-/m0/s1' # derived from ChemSpider 34044335
@unittest.skipUnless(inchi.INCHI_AVAILABLE, 'Inchi required')
class TestInchiInfo(unittest.TestCase):
def doTest(self, inchi, numSp3=0, numUndefSp3=0, numMobileHGroups=0, layer='non-isotopic'):
ii = InchiInfo(inchi)
nSp3, nUndefSp3, _, _ = ii.get_sp3_stereo()['main'][layer]
self.assertEqual(nSp3, numSp3)
self.assertEqual(nUndefSp3, numUndefSp3)
nMobileHGroups, _ = ii.get_mobile_h()['main'][layer]
self.assertEqual(nMobileHGroups, numMobileHGroups)
def testGuanine(self):
self.doTest(GUANINE, 0, 0, 1)
def testTritiatedUrea(self):
self.doTest(TRITIATED_UREA, 0, 0, 1)
def testDeuteratedUrea(self):
self.doTest(DEUTERATED_UREA, 0, 0, 1)
def testAceticAcid(self):
self.doTest(ACETIC_ACID, 0, 0, 1)
def testAcetate(self):
self.doTest(ACETATE, 0, 0, 1)
def testMobile1(self):
self.doTest(mobile1, 0, 0, 2)
def testMobile2(self):
self.doTest(mobile2, 0, 0, 2)
# sp3 stereo
def testSugar1(self):
self.doTest(sugar1, 5, 1, 0)
def testSugar2(self):
self.doTest(sugar2, 5, 0, 0)
def testSP3_unk(self):
self.doTest(sp3_unk, 2, 1, 1)
if __name__ == '__main__': # pragma: nocover
unittest.main()
|
rvianello/rdkit
|
rdkit/Chem/MolKey/UnitTestMolKey.py
|
Python
|
bsd-3-clause
| 5,286
|
[
"RDKit"
] |
bc03dfa456be96c5d155652e4553aba5944cf4ab4b2192cb9ddd52b871650bd7
|
#!/usr/bin/env python
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ctypes
import unittest
import numpy
from pyscf import lib
from pyscf import gto
from pyscf.gto import ft_ao
libpbc = lib.load_library('libpbc')
mol = gto.Mole()
mol.atom = '''
C 1.3 .2 .3
C .1 -.1 1.1 '''
mol.basis = 'ccpvdz'
mol.build()
mesh = (7,9,11)
numpy.random.seed(12)
invh = numpy.diag(numpy.random.random(3))
b = 2*numpy.pi * invh
Gvbase = (numpy.fft.fftfreq(mesh[0], 1./mesh[0]),
numpy.fft.fftfreq(mesh[1], 1./mesh[1]),
numpy.fft.fftfreq(mesh[2], 1./mesh[2]))
Gv = numpy.dot(lib.cartesian_prod(Gvbase), b)
gxyz = lib.cartesian_prod([numpy.arange(len(x)) for x in Gvbase])
def tearDownModule():
global mol, Gvbase, Gv, gxyz
del mol, Gvbase, Gv, gxyz
def ft_ao_o0(mol, Gv):
nao = mol.nao_nr()
ngrids = Gv.shape[0]
aoG = numpy.zeros((nao,ngrids), dtype=numpy.complex)
gx = numpy.empty((12,ngrids), dtype=numpy.complex)
gy = numpy.empty((12,ngrids), dtype=numpy.complex)
gz = numpy.empty((12,ngrids), dtype=numpy.complex)
buf = numpy.empty((64,ngrids), dtype=numpy.complex)
kk = numpy.einsum('ki,ki->k', Gv, Gv)
i0 = 0
for ib in range(mol.nbas):
ci = mol._libcint_ctr_coeff(ib)
ei = mol.bas_exp(ib)
li = mol.bas_angular(ib)
ri = mol.bas_coord(ib)
ni = ci.shape[1]
di = (li*2+1) * ni
nfi = (li+1)*(li+2)//2
kr = numpy.dot(Gv,ri)
cs = numpy.exp(-1j*kr)
buf[:nfi*ni] = 0
for ip in range(ci.shape[0]):
ai = ei[ip]
fac = (numpy.pi/ai)**1.5 * numpy.exp(-.25/ai*kk)
gx[0] = 1
gy[0] = 1
gz[0] = cs * fac
if li > 0:
gx[1] = -1j*Gv[:,0]/(2*ai) * gx[0]
gy[1] = -1j*Gv[:,1]/(2*ai) * gy[0]
gz[1] = -1j*Gv[:,2]/(2*ai) * gz[0]
for m in range(1, li):
gx[m+1] = m/(2*ai) * gx[m-1] - 1j*Gv[:,0]/(2*ai) * gx[m]
gy[m+1] = m/(2*ai) * gy[m-1] - 1j*Gv[:,1]/(2*ai) * gy[m]
gz[m+1] = m/(2*ai) * gz[m-1] - 1j*Gv[:,2]/(2*ai) * gz[m]
for m,(ix,iy,iz) in enumerate(loop_cart(li)):
val = gx[ix] * gy[iy] * gz[iz]
for i, cip in enumerate(ci[ip]):
buf[i*nfi+m] += cip*val
ti = c2s_bra(li, numpy.eye(nfi)).T
tmp1 = numpy.empty((di,ngrids), dtype=numpy.complex)
for i in range(ni):
tmp1[i*(li*2+1):(i+1)*(li*2+1)] = \
numpy.einsum('pi,px->ix', ti, buf[i*nfi:(i+1)*nfi])
aoG[i0:i0+di] += tmp1
i0 += di
return aoG.T
def loop_cart(l):
for ix in reversed(range(l+1)):
for iy in reversed(range(l-ix+1)):
iz = l - ix - iy
yield ix, iy, iz
def c2s_bra(l, gcart):
if l == 0:
return gcart * 0.282094791773878143
elif l == 1:
return gcart * 0.488602511902919921
else:
m = gcart.shape[1]
gsph = numpy.empty((l*2+1,m))
fc2s = gto.moleintor.libcgto.CINTc2s_ket_sph
fc2s(gsph.ctypes.data_as(ctypes.c_void_p), ctypes.c_int(m),
gcart.ctypes.data_as(ctypes.c_void_p), ctypes.c_int(l))
return gsph
def finger(a):
return numpy.dot(a.ravel(), numpy.cos(numpy.arange(a.size)))
class KnownValues(unittest.TestCase):
def test_ft_ao1(self):
ref = ft_ao_o0(mol, Gv)
dat = ft_ao.ft_ao(mol, Gv)
self.assertTrue(numpy.allclose(ref, dat))
dat = ft_ao.ft_ao(mol, Gv, b=b, gxyz=gxyz, Gvbase=Gvbase)
self.assertTrue(numpy.allclose(ref, dat))
def test_ft_ao2(self):
numpy.random.seed(12)
invh = numpy.random.random(3) + numpy.eye(3) * 2.5
b = 2*numpy.pi * invh
Gv = numpy.dot(lib.cartesian_prod(Gvbase), b)
ref = ft_ao_o0(mol, Gv)
dat = ft_ao.ft_ao(mol, Gv)
self.assertTrue(numpy.allclose(ref, dat))
mol1 = mol.copy()
mol1.cart = True
ref = ft_ao.ft_ao(mol1, Gv)
dat = ft_ao.ft_ao(mol1, Gv, b=b, gxyz=gxyz, Gvbase=Gvbase)
self.assertTrue(numpy.allclose(ref, dat))
def test_ft_aopair1(self):
dat = ft_ao.ft_aopair(mol, Gv)
self.assertAlmostEqual(finger(dat), (-5.9794759129252348+8.07254562525371j), 9)
dat_s2 = ft_ao.ft_aopair(mol, Gv, aosym='s2')
nao = dat.shape[-1]
for i in range(nao):
for j in range(i+1):
dat[:,i,j] = dat[:,j,i] = dat_s2[:,i*(i+1)//2+j]
self.assertAlmostEqual(finger(dat), (-5.9794759129252348+8.07254562525371j), 9)
dat1 = ft_ao.ft_aopair(mol, Gv, b=b, gxyz=gxyz, Gvbase=Gvbase)
self.assertAlmostEqual(finger(dat1), (-5.9794759129252348+8.07254562525371j), 9)
def test_ft_aopair2(self):
numpy.random.seed(12)
invh = numpy.random.random(3) + numpy.eye(3) * 2.5
b = 2*numpy.pi * invh
Gv = numpy.dot(lib.cartesian_prod(Gvbase), b)
dat = ft_ao.ft_aopair(mol, Gv)
self.assertAlmostEqual(finger(dat), (-3.1468496579780125-0.019209667673850885j), 9)
dat1 = ft_ao.ft_aopair(mol, Gv, b=b, gxyz=gxyz, Gvbase=Gvbase)
self.assertAlmostEqual(finger(dat1), (-3.1468496579780125-0.019209667673850885j), 9)
def test_ft_aopair_pdotp(self):
dat = ft_ao.ft_aopair(mol, Gv, intor='GTO_ft_pdotp_sph')
self.assertAlmostEqual(finger(dat), (-80.69687735727976+69.239798150854909j), 9)
def test_ft_aopair_pxp(self):
dat = ft_ao.ft_aopair(mol, Gv, intor='GTO_ft_pxp_sph', comp=3)
self.assertAlmostEqual(finger(dat), (3.7490985032017079+43.665863070814687j), 8)
def test_ft_aopair_overlap0(self):
G = numpy.asarray([[-1.679872, 1.679872, 2.937055],
[-1.425679, 1.425679 , 2.492629],
[-1.187609 , 1.187609 , 2.076392]])
mol = gto.M(atom='Ne 7 0.0 0.0; Ne 7 0.0 0.0', basis='3-21g')
dat = ft_ao.ft_aopair(mol, G)
self.assertAlmostEqual(lib.finger(dat), (-1.4150713647161861-0.8020058716859948j), 12)
if __name__ == '__main__':
print('Full Tests for ft_ao')
unittest.main()
|
gkc1000/pyscf
|
pyscf/gto/test/test_ft_ao.py
|
Python
|
apache-2.0
| 6,757
|
[
"PySCF"
] |
991c29eec1a9d626702edfcbf91a5d40af0f8bcfec3a9fc932b8dad799663146
|
"""
Example of a Gaussian distribution
----------------------------------
Figure 3.8.
This shows an example of a gaussian distribution with various parameters.
We'll generate the distribution using::
dist = scipy.stats.norm(...)
Where ... should be filled in with the desired distribution parameters
Once we have defined the distribution parameters in this way, these
distribution objects have many useful methods; for example:
* ``dist.pmf(x)`` computes the Probability Mass Function at values ``x``
in the case of discrete distributions
* ``dist.pdf(x)`` computes the Probability Density Function at values ``x``
in the case of continuous distributions
* ``dist.rvs(N)`` computes ``N`` random variables distributed according
to the given distribution
Many further options exist; refer to the documentation of ``scipy.stats``
for more details.
"""
# Author: Jake VanderPlas
# License: BSD
# The figure produced by this code is published in the textbook
# "Statistics, Data Mining, and Machine Learning in Astronomy" (2013)
# For more information, see http://astroML.github.com
# To report a bug or issue, use the following forum:
# https://groups.google.com/forum/#!forum/astroml-general
import numpy as np
from scipy.stats import norm
from matplotlib import pyplot as plt
#----------------------------------------------------------------------
# This function adjusts matplotlib settings for a uniform feel in the textbook.
# Note that with usetex=True, fonts are rendered with LaTeX. This may
# result in an error if LaTeX is not installed on your system. In that case,
# you can set usetex to False.
from astroML.plotting import setup_text_plots
setup_text_plots(fontsize=8, usetex=True)
#------------------------------------------------------------
# Define the distributions to be plotted
sigma_values = [0.5, 1.0, 2.0]
linestyles = ['-', '--', ':']
mu = 0
x = np.linspace(-10, 10, 1000)
#------------------------------------------------------------
# plot the distributions
fig, ax = plt.subplots(figsize=(5, 3.75))
for sigma, ls in zip(sigma_values, linestyles):
# create a gaussian / normal distribution
dist = norm(mu, sigma)
plt.plot(x, dist.pdf(x), ls=ls, c='black',
label=r'$\mu=%i,\ \sigma=%.1f$' % (mu, sigma))
plt.xlim(-5, 5)
plt.ylim(0, 0.85)
plt.xlabel('$x$')
plt.ylabel(r'$p(x|\mu,\sigma)$')
plt.title('Gaussian Distribution')
plt.legend()
plt.show()
|
gtrichards/PHYS_T480
|
code/fig_gaussian_distribution.py
|
Python
|
mit
| 2,433
|
[
"Gaussian"
] |
732e471adfcae68d79a4e49f9dc79f6766bbcf7c53644610dbf552bbc42f9c41
|
# -*- coding: utf-8 -*-
{
'(Recipient)': '(Empfänger)',
"'Cancel' will indicate an asset log entry did not occur": "'Abbrechen' zeigt an, dass ein Asset Log Eintrag nicht eingetreten ist",
"A location that specifies the geographic area for this region. This can be a location from the location hierarchy, or a 'group location', or a location that has a boundary for the area.": 'Eine Position, die den geografischen Bereich für diese Region definiert. Dies kann ein Standort aus der Standorthierarchie, oder ein Gruppenstandort, oder ein Standort mit Grenzbereich sein.',
"Acronym of the organization's name, eg. IFRC.": 'Abkürzung des Organisationsnamen, z. B. IFRC.',
"Authenticate system's Twitter account": 'Authentifizierung für den Twitter Account des Systems',
"Can't import tweepy": 'Tweepy kann nicht importiert werden',
"Caution: doesn't respect the framework rules!": 'Achtung: Die Rahmenbedingungen des Frameworks werden nicht beachtet!',
"Format the list of attribute values & the RGB value to use for these as a JSON object, e.g.: {Red: '#FF0000', Green: '#00FF00', Yellow: '#FFFF00'}": "Formatieren Sie die Liste der Attributwerte und die RGB-Wert zur Verwendung dieser als ein JSON-Objekt, z. B.: {Rot: '#FF0000 ', grün: '#00FF00 ', gelb: '#FFFF00 '}",
"If selected, then this Asset's Location will be updated whenever the Person's Location is updated.": 'Wenn ausgewählt, wird der Ort dieser Anlage immer aktualisiert, sobald der Standort der Person aktualisiert wird.',
"If this configuration represents a region for the Regions menu, give it a name to use in the menu. The name for a personal map configuration will be set to the user's name.": 'Wenn diese Konfiguration einen Bereich für die Regionenauswahl repräsentiert, geben Sie einen Namen für die Verwendung in der Auswahl. Der Name für eine persönliche Kartenkonfiguration wird mit dem Namen des Benutzers festgelegt.',
"If this field is populated then a user who specifies this Organization when signing up will be assigned as a Staff of this Organization unless their domain doesn't match the domain field.": 'Wenn dieses Feld ausgefüllt ist, dann wird ein Benutzer, der diese Organisation definiert, automatisch als Mitarbeiter dieser Organisation zugeordnet sobald er sich anmeldet, ausgenommen die Domäne stimmt nicht mit dem Domänenfeld überein.',
"If this is ticked, then this will become the user's Base Location & hence where the user is shown on the Map": 'Wenn dies angekreuzt ist, wird es die Basisposition des Benutzers und dadurch gesteuert wo der Benutzer auf der Karte angezeigt wird.',
"If you don't see the Hospital in the list, you can add a new one by clicking link 'Create Hospital'.": "Wenn sie das Krankenhaus nicht in der Liste finden, können Sie ein neues hinzufügen, indem sie den Link 'Krankenhaus hinzufügen' anklicken.",
"If you don't see the Office in the list, you can add a new one by clicking link 'Create Office'.": "Wenn sie das Büro nicht in der Liste finden, können Sie ein neues hinzufügen, indem sie den Link 'Büro hinzufügen' anklicken.",
"If you don't see the Organization in the list, you can add a new one by clicking link 'Create Organization'.": 'Wenn sie die Organisation nicht in der Liste sehen, dann können sie eine neue hinzufügen indem sie auf den Link "Organisation hinzufügen" klicken.',
"Instead of automatically syncing from other peers over the network, you can also sync from files, which is necessary where there's no network. You can use this page to import sync data from files and also export data to sync files. Click the link on the right to go to this page.": 'Anstelle der automatischen Synchronisation von anderen Peers über das Netz, können sie auch über Dateien synchronisieren, was nötig ist, wenn kein Netzwerk vorhanden ist. Sie können diese Seite verwenden um Sync Daten aus Dateien zu importieren and auch um Daten in Form von Sync Dateien zu exportieren. Ein Klick auf den Link rechts bringt Sie zu dieser Seite.',
"Level is higher than parent's": 'Die Stufe ist höher als das übergeordnete Element',
"Need a 'url' argument!": "Braucht eine 'url' als Argument!",
"Optional. The name of the geometry column. In PostGIS this defaults to 'the_geom'.": "Optional. Der Name der Geometrie-Spalte. In PostGIS ist der Standardwert 'the_geom'.",
"Parent level should be higher than this record's level. Parent level is": 'Übergeordnete Ebene muss höher als dieser Eintrag. Die Stufe seines Eltern Elements ist',
"Password fields don't match": 'Kennwortfelder stimmer nicht überein',
"Phone number to donate to this organization's relief efforts.": 'Telefonnummer für Spenden an diese Nothilfeorganisation.',
"Please come back after sometime if that doesn't help.": 'Wenn das nicht hilft, kommen Sie nach einiger Zeit bitte wieder.',
"Quantity in %s's Inventory": "Menge in %s's Bestand",
"Select a Room from the list or click 'Create Room'": "Wählen Sie einen Raum aus der Liste oder klicken Sie auf 'Raum hinzufügen'",
"Select a person in charge for status 'assigned'": 'Wählen Sie eine verantwortliche Person aus für den Status "zugeordnet"',
"Select this if all specific locations need a parent at the deepest level of the location hierarchy. For example, if 'district' is the smallest division in the hierarchy, then all specific locations would be required to have a district as a parent.": "Wählen Sie diese Option, wenn alle speziellen administrativen Zuständigkeitsbereiche auf der untersten Hierarchieebene einen übergeordneten Zuständigkeitsbereich brauchen. Beispiel: Wenn 'district' der kleinste Bereich in der Hierarchie ist, dann müssen alle speziellen Bereiche einen 'district' als übergeordnetes Element haben.",
"Select this if all specific locations need a parent location in the location hierarchy. This can assist in setting up a 'region' representing an affected area.": 'Wählen Sie diese Option, wenn alle speziellen administrativen Zuständigkeitsbereiche einen übergeordneten Zuständigkeitsbereich in der Gebietshierarchie brauchen. Es kann dabei hilfreich sein eine "region" festzulegen, die den betroffenen Bereich repräsentiert.',
"Sorry, things didn't get done on time.": 'Leider konnten die Aufgaben nicht rechtzeitig ausgeführt werden.',
"Sorry, we couldn't find that page.": 'Leider konnte diese Seite nicht gefunden werden.',
"System's Twitter account updated": 'Der Twitter Account des Systems wurde aktualisiert',
"The Donor(s) for this project. Multiple values can be selected by holding down the 'Control' key.": "Die Spender für dieses Projekt. Mehrere Werte können durch Halten der 'Steuerungstaste' (Strg / Ctrl) ausgewählt werden.",
"The URL of the image file. If you don't upload an image file, then you must specify its location here.": 'Die URL der Bilddatei. Wenn Sie keine Grafikdatei hochladen, dann müssen Sie hier eine URL angeben.',
"To search by person name, enter any of the first, middle or last names, separated by spaces. You may use % as wildcard. Press 'Search' without input to list all persons.": "Um nach einem Namen zu suchen, geben Sie durch Leerzeichen getrennt beliebig den Vor-, Mittel- oder Nachnamen ein. Sie können % als Wildcard verwenden. Die Auswahl von 'Suchen' ohne eine Eingabe führt zur Auflistung aller Personen.",
"To search for a body, enter the ID tag number of the body. You may use % as wildcard. Press 'Search' without input to list all bodies.": "Um nach einem Körper zu suchen, geben Sie die Identifikationsmarken-Nummer des Körpers ein. Sie können % als Wildcard verwenden. Die Auswahl von 'Suchen' ohne Eingabe führt zur Auflistung aller Körper.",
"To search for a hospital, enter any of the names or IDs of the hospital, or the organization name or acronym, separated by spaces. You may use % as wildcard. Press 'Search' without input to list all hospitals.": "Für die Suche nach einem Krankenhaus, geben sie entweder den Namen, die ID, den Organisationsnamen oder ein Acronym jeweils getrennt durch Leerzeichen ein. Sie können % als Wildcard verwenden. Die Auswahl von 'Suchen' ohne Eingabe führt zur Auflistung aller Krankenhäuser.",
"To search for a hospital, enter any of the names or IDs of the hospital, separated by spaces. You may use % as wildcard. Press 'Search' without input to list all hospitals.": "Für die Suche nach einem Krankenhaus, geben Sie Namen oder die ID des Krankenhauses getrennt durch Leerzeichen ein. Sie können % als Wildcard verwenden. Die Auswahl von 'Suchen' ohne Eingabe führt zur Auflistung aller Krankenhäuser.",
"To search for a location, enter the name. You may use % as wildcard. Press 'Search' without input to list all locations.": "Um einen Ort zu suchen, geben Sie den Namen ein. Sie können % als Wildcard verwenden. Die Auswahl von Drücken 'Suchen' ohne Eingabe führt zur Auflistung aller Orte.",
"To search for a person, enter any of the first, middle or last names and/or an ID number of a person, separated by spaces. You may use % as wildcard. Press 'Search' without input to list all persons.": "Um nach einer Person zu suchen, geben Sie durch Leerzeichen getrennt beliebig den Vor-, Mittel- oder Nachnamen ein. Sie können % als Wildcard verwenden. Die Auswahl von 'Suchen' ohne eine Eingabe führt zur Auflistung aller Personen.",
"To search for an assessment, enter any portion the ticket number of the assessment. You may use % as wildcard. Press 'Search' without input to list all assessments.": "Für die Suche nach einer Bewertung, geben Sie einen beliebigen Teil der Ticketnummer der Bewertung ein. Sie können % als Wildcard verwenden. Die Auswahl von 'Suchen' ohne Eingabe führt zur Auflistung aller Bewertungen.",
"Type the first few characters of one of the Person's names.": 'Geben Sie die ersten paar Zeichen des Namens einer Person ein.',
"Upload an image file here. If you don't upload an image file, then you must specify its location in the URL field.": 'Laden Sie hier die Grafikdatei hoch. Wenn sie keine Grafikdatei hochladen, dann müssen Sie im Feld eine URL auf eine im Web verfügbare Grafikdatei angeben.',
"When syncing data with others, conflicts happen in cases when two (or more) parties want to sync information which both of them have modified, i.e. conflicting information. Sync module tries to resolve such conflicts automatically but in some cases it can't. In those cases, it is up to you to resolve those conflicts manually, click on the link on the right to go to this page.": 'Beim Synchronisieren der Daten mit anderen Installationen, können Konflikte auftreten wenn beide (oder mehrere) Parteien die gleichen Daten geändert haben, d. h. widersprüchliche Informationen vorliegen. Das Synchronisationsmodul versucht solche Konflikte automatisch zu beheben, was jedoch in manchen Fällen nicht möglich ist. In solchen Fällen ist es Ihre Aufgabe, diese Konflikte manuell zu beheben; klicken Sie auf den rechten Link, um auf diese Seite zu gelangen.',
"You haven't made any calculations": 'Sie haben keine Brechnungen gemacht',
"couldn't be parsed so NetworkLinks not followed.": 'konnte nicht interpretiert so dass Netzwerklinks nicht verfolgt werden.',
"includes a GroundOverlay or ScreenOverlay which aren't supported in OpenLayers yet, so it may not work properly.": 'Enthält ein GroundOverlay oder ScreenOverlay die in OpenLayers noch nicht unterstützt werden, es wird möglicherweise nicht richtig funktionieren.',
'"update" is an optional expression like "field1=\'newvalue\'". You cannot update or delete the results of a JOIN': '"update" ist ein optionaler Ausdruck wie "field1=\'newvalue\'\\ ". Sie können die Ergebnisse eines JOINs nicht aktualisieren oder löschen.',
'# of International Staff': '# der internationalen Mitarbeiter',
'# of National Staff': '# der nationalen Mitarbeiter',
'# of Vehicles': '# der Fahrzeuge',
'%(event)s on': '%(event)s am',
'%(msg)s\nIf the request type is "%(type)s", please enter the %(type)s on the next screen.': '%(msg)s\n Wenn der Typ des Requests "%(type)s" ist, geben Sie die %(type)s bitte auf der nächsten Seite ein.',
'%(number)s transferable cases found': '%(number)s transferierbare Fälle gefunden',
'%(number)s payment(s) registered': '%(number)s Auszahlung(en) registriert',
'%(number)s payment(s) not found': '%(number)s Auszahlung(en) nicht gefunden',
'%(system_name)s - Verify Email': '%(system_name)s - Email überprüfen',
'%s rows deleted': '%s gelöschte Zeilen',
'%s rows updated': '%s Zeilen aktualisiert',
'& then click on the map below to adjust the Lat/Lon fields': '& anschließend klicken Sie auf die Karte weiter unten um die Längen- und Breitengradwerte zu korrigieren',
'* Required Fields': '* erforderliche Felder',
'0-15 minutes': '0 - 15 Minuten',
'1 Assessment': '1 Bewertung',
'1 location, shorter time, can contain multiple Tasks': '1 Position, kürzere Zeit, kann mehrere Aufgaben beinhalten',
'1-3 days': '1-3 Tage',
'15-30 minutes': '15-30 Minuten',
'2 different options are provided here currently:': '2 verschiedene Optionen stehen hier derzeit zur Verfügung:',
'2x4 Car': 'Fahrzeug mit einer Antriebsachse',
'30-60 minutes': '30-60 Minuten',
'4-7 days': '4-7 Tage',
'4x4 Car': 'Allradfahrzeug',
'8-14 days': '8-14 Tage',
'3W': 'Wer? Was? Wo?',
'A Marker assigned to an individual Location is set if there is a need to override the Marker assigned to the Feature Class.': 'Es kann eine Zuordnung eines Symbol zu einer individuellen Position erfolgen, um damit die Symbolisierung der Objektklasse zu überschreiben.',
'A Reference Document such as a file, URL or contact person to verify this data. You can type the 1st few characters of the document name to link to an existing document.': 'Ein Referenzdokument wie z. B. eine Datei, URL oder Ansprechpartner zur Überprüfung dieser Daten. Sie können die ersten Zeichen eines vorhandenen Dokumentnamens eingeben um dieses zu referenzieren.',
'A brief description of the group (optional)': 'Eine kurze Beschreibung der Gruppe (optional)',
'A catalog of different Assessment Templates including summary information': 'Ein Katalog von verschiedenen Beurteilungsvorlagen inklusive einer Zusammenfassung',
'A file downloaded from a GPS containing a series of geographic points in XML format.': 'Eine Datei von einem GPS Gerät das eine Reihe von geographischen Positionen im XML-Format enthält.',
'A file in GPX format taken from a GPS whose timestamps can be correlated with the timestamps on the photos to locate them on the map.': 'Eine Datei im GPX-Format aus einem GPS Gerät deren Zeitstempel genutzt werden können, um sie mit den Zeitstempeln von Fotos zu verknüpfen und diese dann auf einer Karte darzustellen.',
'A library of digital resources, such as photos, documents and reports': 'Eine Bibliothek von digitalen Ressourcen, wie z. B. Fotos, Dokumente und Berichte',
'A location group can be used to define the extent of an affected area, if it does not fall within one administrative region.': 'Eine Gebietsgruppe kann verwendet werden, um den Bereich eines betroffenen Gebietes zu definieren, falls dieses nicht mit einer vorhandenen administrativen Einheit zusammenfällt.',
'A location group is a set of locations (often, a set of administrative regions representing a combined area).': 'Eine Gebietsgruppe besteht aus mehreren Gebieten (häufig eine Gruppe von Verwaltungsregionen, die einen eigenen Zuständigkeitsbereich bilden).',
'A location group must have at least one member.': 'Eine Gebietsgruppe muss mindestens ein Element beinhalten.',
'A unique code to identify the status': 'Ein eindeutiger Code um den Status zu identifizieren',
'ABOUT THIS MODULE': 'ÜBER DIESES MODUL',
'ACCESS DATA': 'ZUGRIFFSDATEN',
'Actioning officer': 'Verantwortliche Person',
'ANY': 'Irgendwelche',
'API is documented here': 'Die API ist hier dokumentiert',
'ATC-20 Rapid Evaluation modified for New Zealand': 'ATC-20 Schnelle Evaluierung - angepasst für Neuseeland',
'Abbreviation': 'Abkürzung',
'Ability to Fill Out Surveys': 'Möglichkeit Umfragen auszufüllen',
'Ability to customize the list of details tracked at a Shelter': 'Möglichkeit die Liste der Detailangaben zu einer Unterkunft anzupassen',
'Ability to customize the list of human resource tracked at a Shelter': 'Möglichkeit die Liste der menschlichen Ressourcen einer Unterkunft anzupassen',
'Ability to customize the list of important facilities needed at a Shelter': 'Möglichkeit die Liste mit den wichtigen Einrichtungen, die in einer Unterkunft benötigt werden, anzupassen',
'Ability to view Results of Completed and/or partially filled out Surveys': 'Möglichkeit die Ergebnisse von abgeschlossen und/oder teilweise ausgefüllten Umfragen zu einzusehen',
'About': 'Über',
'About Us': 'Über uns',
'Accept Push': 'Akzeptiert Push',
'Access denied': 'Zugriff verweigert',
'Access to Shelter': 'Zugang zu Unterkünften',
'Access to education services': 'Zugang zu Ausbildungsdienstleistungen',
'Accessibility of Affected Location': 'Erreichbarkeit der betroffenen Region',
'Accompanied Child': 'Begleitetes Kind',
'Account Registered - Please Check Your Email': 'Benutzerkonto registriert - Bitte überprüfen Sie Ihre E-Mail',
'Account SID': 'SID des Accounts',
'Acronym': 'Abkürzung',
'Actionable by all targeted recipients': 'Bearbeitbar von allen adressierten Empfängern',
'Actionable only by designated exercise participants; exercise identifier SHOULD appear in <note>': 'Bearbeitbar nur von bestimmten Übungsteilnehmern; Übungsidentifikator sollte unter <note> auftauchen',
'Actioned?': 'Bearbeitet?',
'Actions taken as a result of this request.': 'Als Ergebnis auf diese Anfrage gestartete Aktionen.',
'Actions': 'Aktionen',
'Activate Events from Scenario templates for allocation of appropriate Resources (Human, Assets & Facilities).': 'Aktivieren Sie Ereignisse aus den SZENARIO Vorlagen um die passenden Ressourcen zuzuordnen (Menschen, Anlagen und Einrichtungen).',
'Active': 'Aktiv',
'Active Appointment': 'Aktiver Termin',
'Active Problems': 'Aktive Probleme',
'Activities matching Assessments': 'Aktivitäten passend zur Beurteilung',
'Activities of boys 13-17yrs before disaster': 'Aktivitäten von Jungen im Alter zwischen 13-17 Jahren vor der Katastrophe',
'Activities of boys 13-17yrs now': 'Aktivitäten von Jungen im Alter zwischen 13-17 Jahren heute',
'Activities of boys <12yrs before disaster': 'Aktivitäten von Jungen unter 12 Jahren vor der Katastrophe',
'Activities of boys <12yrs now': 'Aktivitäten von Jungen unter 12 Jahren heute',
'Activities of children': 'Aktivitäten von Kindern',
'Activities of girls 13-17yrs before disaster': 'Aktivitäten von Mädchen im Alter von 13-17 Jahren vor der Katastrophe',
'Activities of girls 13-17yrs now': 'Aktivitäten von Mädchen im Alter von 13-17 Jahren heute',
'Activities of girls <12yrs before disaster': 'Aktivitäten von Mädchen unter 12 Jahren vor der Katastrophe',
'Activities of girls <12yrs now': 'Aktivitäten von Mädchen unter 12 Jahre heute',
'Activities': 'Aktivitäten',
'Activities to follow up': 'Fällige Wiedervorlagen',
'Activity Added': 'Aktivität hinzugefügt',
'Activity Deleted': 'Aktivität gelöscht',
'Activity Details': 'Details zur Aktivität',
'Activity Report': 'Bericht zur Aktivität',
'Activity Reports': 'Berichte zu Aktivitäten',
'Activity Type': 'Typ der Aktivität',
'Activity Types': 'Typen von Aktivität',
'Activity Updated': 'Aktivität aktualisiert',
'Activity': 'Aktivität',
'Add Activity Type': 'Aktivitätstyp hinzufügen',
'Add Address': 'Adresse hinzufügen',
'Add Alternative Item': 'Alternativen Artikel hinzufügen',
'Add Assessment Summary': 'Zusammenfassung der Beurteilung hinzufügen',
'Add Assessment': 'Beurteilung hinzufügen',
'Add Asset Log Entry - Change Label': 'Bestandsprotokoll Eintrag hinzufügen - Beschriftung verändern',
'Add Availability': 'Verfügbarkeit hinzufügen',
'Add Baseline Type': 'Basislinien-Typ hinzufügen',
'Add Baseline': 'Basislinie hinzufügen',
'Add Bundle': 'Paket hinzufügen',
'Add Camp Service': 'Camp-Dienst hinzufügen',
'Add Camp Type': 'Camp Art hinzufügen',
'Add Camp': 'Camp hinzufügen',
'Add Certificate for Course': 'Zertifikat für Kurs hinzufügen',
'Add Certification': 'Zertifizierung hinzufügen',
'Add Competency': 'Qualifikation hinzufügen',
'Add Contact': 'Kontaktperson hinzufügen',
'Add Contact Information': 'Kontaktinformation hinzufügen',
'Add Credential': 'Qualifikation hinzufügen',
'Add Credentials': 'Qualifikationen hinzufügen',
'Add Disaster Victims': 'Katastrophenopfer hinzufügen',
'Add Distribution.': 'Verteilung hinzufügen.',
'Add Donor': 'Spender hinzufügen',
'Add Family Member': 'Familienmitglied hinzufügen',
'Add Flood Report': 'Flut Bericht hinzufügen',
'Add Group Member': 'Gruppenmitglied hinzufügen',
'Add Human Resource': 'Personal hinzufügen',
'Add Identity': 'Identität hinzufügen',
'Add Image': 'Bild hinzufügen',
'Add Impact Type': 'Auswirkungstyp Hinzufügen',
'Add Impact': 'Auswirkung hinzufügen',
'Add Item to Catalog': 'Artikel zu Katalog hinzufügen',
'Add Item to Commitment': 'Eintrag zur Zusage hinzufügen',
'Add Item to Inventory': 'Artikel zu Inventar hinzufügen',
'Add Item to Request': 'Artikel zur Anforderung hinzufügen',
'Add Item to Shipment': 'Artikel der Lieferung hinzufügen',
'Add Item': 'Artikel hinzufügen',
'Add Job Role': 'Tätigkeit hinzufügen',
'Add Key': 'Schlüssel hinzufügen',
'Add Kit': 'Ausstattung (Kit) hinzufügen',
'Add Layer to this Profile': 'Kartenebene zu diesem Profil hinzufügen',
'Add Level 1 Assessment': 'Stufe 1 Beurteilung hinzufügen',
'Add Level 2 Assessment': 'Stufe 2 Beurteilung hinzufügen',
'Add Location': 'Standort hinzufügen',
'Add Log Entry': 'Protokolleintrag hinzufügen',
'Add Member': 'Mitglied hinzufügen',
'Add Membership': 'Mitgliedschaft hinzufügen',
'Add Message': 'Nachricht hinzufügen',
'Add Mission': 'Auftrag hinzufügen',
'Add Mobile Commons Settings': 'Mobile Commons Einstellungen hinzufügen',
'Add Need Type': 'Bedarfstyp hinzufügen',
'Add Need': 'Bedarf hinzufügen',
'Add New Assessment Summary': 'Neue Beurteilungsbeschreibung hinzufügen',
'Add New Baseline Type': 'Einen neuen Grundlinientyp hinzufügen',
'Add New Baseline': 'Eine neue Grundlinie hinzufügen',
'Add New Budget': 'Ein neues Budget hinzufügen',
'Add New Bundle': 'Ein neues Paket hinzufügen',
'Add New Camp Service': 'Neuen Camp Service hinzufügen',
'Add New Camp Type': 'Neuen Camp Typ hinzufügen',
'Add New Camp': 'Neues Camp hinzufügen',
'Add New Cluster Subsector': 'Neuen Cluster Unterbereich hinzufügen',
'Add New Cluster': 'Neuen Cluster hinzufügen',
'Add New Commitment Item': 'Zugesagten Artikel hinzufügen',
'Add New Document': 'Neues Dokument hinzufügen',
'Add New Donor': 'Neuen Spender hinzufügen',
'Add New Entry': 'Neuen Eintrag hinzufügen',
'Add New Event': 'Neues Ereignis hinzufügen',
'Add New Flood Report': 'Neuen Flutbericht hinzufügen',
'Add New Human Resource': 'Neue Human Resource hinzufügen',
'Add New Image': 'Neue Grafik hinzufügen',
'Add New Impact Type': 'Neuen Auswirkungstyp hinzufügen',
'Add New Impact': 'Neue Auswirkung hinzufügen',
'Add New Item to Kit': 'Neuen Artikel zur Ausstattung (Kit) hinzufügen',
'Add New Key': 'Neuen Schlüssel hinzufügen',
'Add New Level 1 Assessment': 'Stufe 1 Beurteilung hinzufügen',
'Add New Level 2 Assessment': 'Stufe 2 Beurteilung hinzufügen',
'Add New Member': 'Neues Mitglied hinzufügen',
'Add New Membership': 'Neue Mitgliedschaft hinzufügen',
'Add New Need Type': 'Neuen Bedarfstyp hinzufügen',
'Add New Need': 'Neuen Bedarf hinzufügen',
'Add New Population Statistic': 'Neue Bevölkerungsstatistik hinzufügen',
'Add New Problem': 'Neues Problem hinzufügen',
'Add New Rapid Assessment': 'Neue Schnell-Beurteilung hinzufügen',
'Add New Received Item': 'Neuen erhaltenen Artikel hinzufügen',
'Add New Record': 'Neuen Datensatz hinzufügen',
'Add New Request Item': 'Neuen Anfrageartikel hinzufügen',
'Add New Request': 'Neue Anfrage hinzufügen',
'Add New River': 'Neuen Fluss hinzufügen',
'Add New Role to User': 'Benutzer eine neue Rolle zuweisen',
'Add New Scenario': 'Neues Szenario hinzufügen',
'Add New Sent Item': 'Neuen gesendeten Artikel hinzufügen',
'Add New Setting': 'Neue Einstellung hinzufügen',
'Add New Solution': 'Neue Lösung hinzufügen',
'Add New Staff Type': 'Neue Mitarbeitertyp hinzufügen',
'Add New Subsector': 'Neuen Teilbereich hinzufügen',
'Add New Survey Answer': 'Neue Antwort zur Umfrage hinzufügen',
'Add New Survey Question': 'Neue Frage zur Umfrage hinzufügen',
'Add New Survey Series': 'Neue Umfrageserie hinzufügen',
'Add New Survey Template': 'Neue Umfragevorlage hinzufügen',
'Add New Team': 'Neues Team hinzufügen',
'Add New Ticket': 'Neues Ticket hinzufügen',
'Add New Track': 'Neuen Pfad hinzufügen',
'Add New User to Role': 'Neuen Benutzer der Rolle hinzufügen',
'Add New': 'Neu hinzufügen',
'Add Organization Domain': 'Organisationsdomain hinzufügen',
'Add Peer': 'Peer-Zugriffspunkt hinzufügen',
'Add Person': 'Person hinzufügen',
'Add Photo': 'Foto hinzufügen',
'Add PoI': 'PoI hinzufügen',
'Add Population Statistic': 'Neue Bevölkerungsstatistik hinzufügen',
'Add Position': 'Position hinzufügen',
'Add Problem': 'Problem hinzufügen',
'Add Question': 'Frage hinzufügen',
'Add Rapid Assessment': 'Schnell-Beurteilung hinzufügen',
'Add Record': 'Datensatz hinzufügen',
'Add Reference Document': 'Referenzdokument hinzufügen',
'Add Report': 'Bericht hinzufügen',
'Add Request': 'Anfrage hinzufügen',
'Add Resource': 'Ressource hinzufügen',
'Add Section': 'Abschnitt hinzufügen',
'Add Setting': 'Einstellung hinzufügen',
'Add Skill': 'Fähigkeit hinzufügen',
'Add Skill Equivalence': 'Fähigkeitsäquivalenz hinzufügen',
'Add Skill Provision': 'Fähigkeitsbestimmung hinzufügen',
'Add Skill to Request': 'Fähigkeit zur Anfrage hinzufügen',
'Add Solution': 'Lösung hinzufügen',
'Add Staff Type': 'Mitarbeitertyp hinzufügen',
'Add Subscription': 'Abonnement hinzufügen',
'Add Subsector': 'Teilbereich hinzufügen',
'Add Survey Answer': 'Umfrageantwort hinzufügen',
'Add Survey Question': 'Umfrage Frage hinzufügen',
'Add Survey Series': 'Umfrage Serie hinzufügen',
'Add Survey Template': 'Umfrage Vorlage hinzufügen',
'Add Team Member': 'Teammitglied hinzufügen',
'Add Team': 'Team hinzufügen',
'Add Ticket': 'Ticket hinzufügen',
'Add to Bin': 'Zum Lagerbehälter hinzufügen',
'Add Training': 'Schulung hinzufügen',
'Add Twilio Channel': 'Twilio Kanal hinzufügen',
'Add Twitter Channel': 'Twitter Kanal hinzufügen',
'Add Unit': 'Einheit hinzufügen',
'Add Vehicle': 'Fahrzeug hinzufügen',
'Add Vehicle Type': 'Fahrzeugtyp hinzufügen',
'Add Volunteer Availability': 'Verfügbarkeit von Freiwilligen hinzufügen',
'Add a Reference Document such as a file, URL or contact person to verify this data. If you do not enter a Reference Document, your email will be displayed instead.': 'Fügen Sie ein Referenzdokument z. B. eine Datei, URL oder einen Ansprechpartner zur Überprüfung dieser Daten ein. Wenn Sie kein Referenzdokument angeben, wird stattdessen ihre Mailadresse angezeigt.',
'Add a Volunteer': 'Einen Freiwilligen hinzufügen',
'Add a new certificate to the catalog.': 'Hinzufügen eines neuen Zertifikats zum Katalog',
'Add a new competency rating to the catalog.': 'Neue Kompetenzeinstufung zum Katalog hinzufügen',
'Add a new course to the catalog.': 'Neuen Kurs zum Katalog hinzufügen',
'Add a new job role to the catalog.': 'Neue Tätigkeit zum Katalog hinzufügen',
'Add a new skill provision to the catalog.': 'Neue Bereitstellung einer Fähigkeit zum Katalog hinzufügen',
'Add a new skill to the catalog.': 'Neue Fähigkeit zum Katalog hinzufügen',
'Add a new skill type to the catalog.': 'Neue Fähigkeitsart zum Katalog hinzufügen.',
'Add new Group': 'Neue Gruppe hinzufügen',
'Add new Individual': 'Hinzufügen neues Individuum',
'Add new project.': 'Neues Projekt hinzufügen.',
'Add staff members': 'Mitarbeiter hinzufügen',
'Add strings manually': 'Texte händisch hinzufügen',
'Add to a Team': 'Zu einem Team hinzufügen',
'Add to Bundle': 'Zu Paket hinzufügen',
'Add to budget': 'Zum Budget hinzufügen',
'Add volunteers': 'Freiwillige hinzufügen',
'Add': 'Hinzufügen',
'Add/Edit/Remove Layers': 'Hinzufügen/Bearbeiten/Entfernen von Kartenebenen',
'Added to Group': 'Zur Gruppe hinzugefügt',
'Added to Team': 'Zum Team hinzugefügt',
'Additional Beds / 24hrs': 'Zusätzliche Betten / 24 Std.',
'Address Details': 'Details zur Adresse',
'Address Type': 'Typ der Adresse',
'Address added': 'Adresse hinzugefügt',
'Address deleted': 'Adresse gelöscht',
'Address updated': 'Adresse aktualisiert',
'Address': 'Adresse',
'Addresses': 'Adressen',
'Adequate food and water available': 'Angemessene Nahrung und Wasser verfügbar',
'Adequate': 'Angemessen',
'Adjust Stock Levels': 'Lagerbestand anpassen',
'Adjust Stock': 'Lagerbestand anpassen',
'Admin': 'Administration',
'Admin Email': 'Email Administrator ',
'Admin Name': 'Name Administrator',
'Admin Tel': 'Telefonnummer Administrator',
'Administration': 'Administrator',
'Administrative support cost': 'Kosten für administrative Unterstützung',
'Admission from': 'Zugang von',
'Admissions': 'Zugänge',
'Admissions/24hrs': 'Zugänge / 24 Stunden',
'Admitted on': 'Zugang am',
'Adolescent (12-20)': 'Heranwachsende (12-20)',
'Adolescent participating in coping activities': 'Teenager Teilnahme an Aktivitäten kopieren',
'Adopted Child': 'Adoptiertes Kind',
'Adult (21-50)': 'Erwachsene (21-50)',
'Adult ICU': 'Erwachsene ICU',
'Adult Psychiatric': 'Erwachsener - psychiatrisch auffällig',
'Adult female': 'Erwachsener - weiblich',
'Adult male': 'Erwachsener - männlich',
'Adults in prisons': 'Erwachsenen in Gefängnis',
'Advanced': 'Erweitert',
'Advanced Javascript Layers': 'Advanced Javascript Layers',
'Advice': 'Hinweise',
'Advice at Check-in': 'Hinweis bei Check-in',
'Advice at Check-out': 'Hinweis bei Check-out',
'Advice at ID Check': 'Hinweis bei ID Prüfung',
'Advisory': 'Beratend',
'After clicking on the button, a set of paired items will be shown one by one. Please select the one solution from each pair that you prefer over the other.': 'Nach einem Klick auf den Button, wird ein Satz von gekoppelten Elemente nacheinander gezeigt werden. Bitte wählen Sie diejenige Lösung aus jedem Paar, die sie gegenüber der anderen bevorzugen.',
'Age': 'Alter',
'Age Group': 'Altersgruppe',
'Age group does not match actual age.': 'Altersgruppe passt nicht zum tatsächlichen Alter.',
'Age group': 'Altersgruppe',
'Aggravating factors': 'Erschwerende Faktoren',
'Aggregate': 'Zusammenstellung',
'Agriculture': 'Landwirtschaft',
'Air Transport Service': 'Lufttransportsservice',
'Aircraft Crash': 'Flugzeugabsturz',
'Aircraft Hijacking': 'Flugzeugentführung',
'Aircraft Maximum Size': 'Maximale Größe des Flugzeugs',
'Airports': 'Flughäfen',
'Airport Closure': 'Flughafenschließung',
'Airspace Closure': 'Luftraumsperrung',
'Alcohol': 'Alkohol',
'All Activities': 'Alle Aktivitäten',
'All Cases': 'Alle Fälle',
'All Inbound & Outbound Messages are stored here': 'Alle eingehenden und abgehenden Nachrichten werden hier gespeichert',
'All Resources': 'Alle Ressourcen',
'All Tasks': 'Alle Aufgaben',
'All data provided by the Sahana Software Foundation from this site is licenced under a Creative Commons Attribution licence. However, not all data originates here. Please consult the source field of each entry.': 'Alle von der Sahana Software Foundation bereitgestellten Daten dieser Seite sind unter der Creative Commons Attribution licence lizenziert. Es stammen jedoch nicht alle Daten von hier. Bitte beachten Sie das Quellen-Feld des jeweiligen Eintrags.',
'All': 'Alles',
'All Records': 'Alle Datensätze',
'Allocate Group': 'Gruppe zuweisen',
'Allowance': 'Taschengeld',
'Allowances': 'Taschengelder',
'Allowance Information': 'Informationen zum Taschengeld',
'Allowance Information added': 'Information zum Taschengeld hinzugefügt',
'Allowance Information deleted': 'Information zum Taschengeld gelöscht',
'Allowance Information updated': 'Information zum Taschengeld aktualisiert',
'Allowance Payment': 'Taschengeldauszahlung',
'Allowance Payments': 'Taschengeldauszahlungen',
'Allowance Suspended': 'Taschengeld ausgesetzt',
'Allowed to push': 'Dürfen push',
'Allows a Budget to be drawn up': 'Ermöglicht ein Budget aufzustellen.',
'Allows authorized users to control which layers are available to the situation map.': 'Erlaubt berechtigten Benutzern zu steuern, welche Kartenebenen auf der Lagekarte verfügbar sind.',
'Alternative Item Details': 'Details zum alternativen Artikel',
'Alternative Item added': 'Alternativer Artikel hinzugefügt.',
'Alternative Item deleted': 'Alternativer Artikel gelöscht',
'Alternative Item updated': 'Alternativer Artikel aktualisiert',
'Alternative Item': 'Alternativer Artikel',
'Alternative Items': 'Alternative Artikel',
'Alternative places for studying': 'Alternative Orte für das Studium',
'Ambulance Service': 'Ambulanter Krankendienst',
'Amount': 'Betrag',
'An Assessment Template can be selected to create a Disaster Assessment. Within a Disaster Assessment, responses can be collected and results can analyzed as tables, charts and maps': 'Es kann eine Beurteilungsvorlage zur Erstellung einer Katastrophenbeurteilung ausgewählt werden. Innerhalb der Katastrophenbeurteilung können Antworten gesammmelt und Ergebnisse in Form von Tabellen, Graphiken und Karten erzeugt werden.',
'An intake system, a warehouse management system, commodity tracking, supply chain management, procurement and other asset and resource management capabilities.': 'Ein Aufnahmesystem, ein Warenhausmanagementsystem, Warenlieferungsverfolgung, Versorgungskettenmanagement, Beschaffung und andere Anlagen-und Verwaltungsfunktionen.',
'An item which can be used in place of another item': 'Ein Artikel, der anstatt eines anderen Artikels verwendet werden kann',
'Analysis of Completed Surveys': 'Analyse von abgeschlossenen Umfragen',
'Animal Die Off': 'Tiere Sterben',
'Animal Feed': 'Tierfutter',
'Announcements': 'Aktuelle Hinweise',
'Anthropology': 'Anthropologie',
'Antibiotics available': 'Antibiotika verfügbar',
'Antibiotics needed per 24h': 'Menge an Antibiotika die pro 24h benötigt wird',
'Apparent Age': 'Offensichtliches Alter',
'Apparent Gender': 'Offensichtliches Geschlecht',
'Application': 'Anwendung',
'Application Deadline': 'Anwendung Frist',
'Application Permissions': 'Anwendungsberechtigungen',
'Apply changes': 'Änderungen übernehmen',
'Appointments': 'Termine',
'Appointment Type': 'Terminart',
'Appointment Types': 'Terminarten',
'Appointment Type Details': 'Details zur Terminart',
'Appointment Type added': 'Terminart hinzugefügt',
'Appointment Type deleted': 'Terminart gelöscht',
'Appointment Type updated': 'Terminart aktualisiert',
'Approve': 'Bestätigen',
'Approved': 'Bestätigt',
'Approver': 'Bestätigende Stelle',
'Archive': 'Archiv',
'Archived': 'Archiviert',
'Archived Cases': 'Archivierte Fälle',
'Arctic Outflow': 'Arktischer Abfluss',
'Areas inspected': 'Untersuchte Gebiete',
'Are you sure you want to delete this record?': 'Sind Sie sicher dass Sie diesen Datensatz löschen wollen?',
'Assessment Details': 'Details zur Beurteilung',
'Assessment Reported': 'Beurteilung gemeldet',
'Assessment Summaries': 'Zusammenfassungen der Beurteilung',
'Assessment Summary Details': 'Details zur Zusammenfassung der Beurteilung',
'Assessment Summary added': 'Zusammenfassung der Beurteilung hinzugefügt',
'Assessment Summary deleted': 'Zusammenfassung der Beurteilung gelöscht',
'Assessment Summary updated': 'Zusammenfassung der Beurteilung aktualisiert',
'Assessment added': 'Beurteilung hinzugefügt',
'Assessment admin level': 'Admin Ebene zur Beurteilung',
'Assessment deleted': 'Beurteilung gelöscht',
'Assessment timeline': 'Beurteilungszeitachse',
'Assessment updated': 'Beurteilung aktualisiert',
'Assessment': 'Beurteilung',
'Assessment Templates': 'Beurteilungsvorlage',
'Assessments Needs vs. Activities': 'Bedarf für Beurteilungen gegenüber den Aktivitäten',
'Assessments and Activities': 'Beurteilungen und Aktivitäten',
'Assessments': 'Beurteilungen',
'Assessor': 'Beurteilender',
'Asset Details': 'Details zur Anlage',
'Asset Log Details': 'Anlage Protokoll Details',
'Asset Log Empty': 'Anlage Protokoll leer',
'Asset Log Entry Added - Change Label': 'Anlage Protokolleintrag hinzugefügt - Beschriftung ändern',
'Asset Log Entry deleted': 'Anlage Protokolleintrag gelöscht',
'Asset Log Entry updated': 'Anlage Protokolleintrag aktualisiert',
'Asset Management': 'Anlageverwaltung',
'Asset Number': 'Anlagenummer',
'Asset added': 'Anlage hinzugefügt',
'Asset deleted': 'Anlage gelöscht',
'Asset removed': 'Anlage entfernt',
'Asset updated': 'Anlage aktualisiert',
'Asset': 'Anlage',
'Assets are resources which are not consumable but are expected back, so they need tracking.': 'Anlagen sind Ressourcen, die nicht verbrauchbar sind aber zurück erwartet werden, daher müssen sie nachverfolgt werden.',
'Assets': 'Anlagen',
'Assign': 'Zuordnen',
'Assign ': 'Zuordnung ',
'Assign Group': 'Gruppe zuordnen',
'Assign Shelter': 'Unterkunft zuordnen',
'Assign Staff': 'Mitarbeiter zuordnen',
'Assign to Org.': 'Der Org. zuordnen',
'Assign to Organization': 'Der Organisation zuordnen',
'Assign to Person': 'Der Person zuordnen',
'Assign to Site': 'Dem Standort zuordnen',
'Assigned By': 'Zugeordnet von',
'Assigned To': 'Zugeordnet zu',
'Assigned to Organization': 'Zur Organisation zugeordnet',
'Assigned to Person': 'Zur Person zugeordnet',
'Assigned to Site': 'Zum Standort zugeordnet',
'Assigned to': 'Zugeordnet zu',
'Assigned': 'Zugeordnet',
'Assume this event type if no type was specified for an event': 'Diesen Ereignistyp annehmen wenn für ein Ereignis kein Typ angegeben wurde',
'Asylum Application': 'Asylantrag',
'At/Visited Location (not virtual)': '/ In Augenschein genommener Ort (nicht virtuell)',
'Attachments': 'Anhänge',
'Attend to information sources as described in <instruction>': 'Sich um Informationsquellen kümmern wie im Abschnitt beschrieben',
'Attribution': 'Eigenschaften',
'Authentication Required': 'Anmeldung erforderlich',
'Author': 'Autor',
'Automatically create this appointment for new cases': 'Termin für neue Fälle automatisch anlegen',
'Availability': 'Verfügbarkeit',
'Availability of bath handicap facilities': 'Verfügbarkeit eines behindertengerechten Bades',
'Available Alternative Inventories': 'Verfügbare alternative Bestände',
'Available Bath': 'Verfügbarkeit von Bädern',
'Available Beds': 'Verfügbare Betten',
'Available Capacity': 'Verfügbare Kapazität',
'Available Inventories': 'Verfügbare Bestände',
'Available Messages': 'Verfügbare Nachrichten',
'Available Records': 'Verfügbare Datensätze',
'Available Shower': 'Dusche vorhanden',
'Available databases and tables': 'Verfügbare Datenbanken und Tabellen',
'Available for Location': 'Verfügbar für Ort',
'Available from': 'Verfügbar von',
'Available in Viewer?': 'Verfügbar in Lagedarstellung?',
'Available of shower handicap facilities': 'Verfügbarkeit einer behindertengerechten Dusche',
'Available until': 'Verfügbar bis',
'Available': 'Verfügbar',
'Avalanche': 'Lawine',
'Average': 'Durchschnitt',
'Avoid the subject event as per the <instruction>': 'Vermeiden das Thema Ereignis als je<instruction>',
'Awards': 'Auszeichnungen',
'Background Color for Text blocks': 'Hintergrundfarbe für Textblöcke',
'Background Color': 'Hintergrundfarbe',
'Back to Check-in/Check-out': 'Zurück zu Check-in/Check-out',
'Back to %(appname)s': 'Zurück zu %(appname)s',
'Baldness': 'Kahlköpfigkeit',
'BAMF Registration': 'BAMF Registrierung',
'Banana': 'Banane',
'Bank/micro finance': 'Bank/Mikro Finanzierung',
'Barge Capacity': 'Frachtschiffkapazitäten',
'Barricades are needed': 'Barrikaden sind erforderlich',
'Base Layer?': 'Basis Kartenebene?',
'Base Location': 'Basis Standort/Region',
'Base Site Set': 'Basisstandort definieren',
'Baseline Data': 'Referenzdatum Daten',
'Baseline Number of Beds': 'Referenzdatum Anzahl von Betten',
'Baseline Type Details': 'Referenzdatumstyp Details',
'Baseline Type added': 'Referenzdatumstyp hinzugefügt',
'Baseline Type deleted': 'Referenzdatumstyp gelöscht',
'Baseline Type updated': 'Referenzdatumstyp aktualisiert',
'Baseline Type': 'Referenzdatumstyp',
'Baseline Types': 'Referenzdatumstypen',
'Baseline added': 'Referenzdatum hinzugefügt',
'Baseline deleted': 'Referenzdatum gelöscht',
'Baseline number of beds of that type in this unit.': 'Referenzdatum Anzahl von Betten dieses Typs in dieser Einheit.',
'Baseline updated': 'Referenzdatum aktualisiert',
'Baselines Details': 'Referenzdaten Details',
'Baselines': 'Referenzdaten',
'Basic Assessment Reported': 'Grundlegende Beurteilung berichtet',
'Basic Assessment': 'Grundlegende Beurteilung',
'Basic Details': 'Grundlegende Details',
'Basic reports on the Shelter and drill-down by region': 'Grundlegende Berichte über Unterkunft und Drill-down nach Region',
'Bath Availability': 'Bad vorhanden',
'Bath Handicap Facilities': 'Behindertengerechtes Bad',
'Bath with handicap facilities': 'Bad mit behindertengerechter Einrichtung',
'Baud rate to use for your modem - The default is safe for most cases': 'Baudrate für das Modem - der Standardwert in den meisten Fällen ausreichend',
'BEA Registration': 'BEA Registrierung',
'Beam': 'Träger',
'Bed Capacity per Unit': 'Bettenkapazität pro Einheit',
'Bed Capacity': 'Bettenkapazität',
'Bed Type': 'Bett-Typ',
'Bed type already registered': 'Bett-Typ bereits registriert',
'Below ground level': 'Unter dem Erdgeschoss',
'Beneficiaries': 'Begünstigte',
'Beneficiary': 'Begünstigter',
'Beneficiary Type': 'Typ des Begünstigten',
'BFV Arrival': 'BFV Ankunft',
'Biological Hazard': 'Biologische Gefahr',
'Bin': 'Lagerbehälter',
'Biscuits': 'Kekse',
'Blizzard': 'Schneesturm',
'Blood Type (AB0)': 'Blutgruppe (ABO)',
'Blowing Snow': 'Schneewehen',
'Boat': 'Boot',
'Bodies found': 'Leichen gefunden',
'Bodies recovered': 'Leichen geborgen',
'Body Recovery Request': 'Leichenbergungsanforderung',
'Body Recovery Requests': 'Leichenbergungsanforderungen',
'Body': 'Body',
'Bomb Explosion': 'Bombenexplosion',
'Bomb Threat': 'Bombendrohung',
'Bomb': 'Bombe',
'Border Color for Text blocks': 'Rahmenfarbe für Textblöcke',
'Both': 'Beides',
'Brand Details': 'Details zur Marke',
'Brand added': 'Marke hinzugefügt',
'Brand deleted': 'Marke gelöscht',
'Brand updated': 'Marke aktualisiert',
'Brand': 'Marke',
'Brands': 'Marken',
'Bricks': 'Ziegelsteine',
'Bridge Closed': 'Brücke ist geschlossen',
'Bucket': 'Eimer',
'Budget Details': 'Details zum Budget',
'Budget Updated': 'Budget aktualisiert',
'Budget added': 'Budget hinzugefügt',
'Budget deleted': 'Budget gelöscht',
'Budget updated': 'Budget aktualisiert',
'Budget': 'Budget',
'Budgeting Module': 'Budget Modul',
'Buffer': 'Puffer',
'Bug': 'Programmfehler',
'Building': 'Gebäude',
'Building Assessments': 'Gebäudebeurteilungen',
'Building Collapsed': 'Gebäude zusammengebrochen',
'Building Name': 'Name des Gebäudes',
'Building Safety Assessments': 'Bewertung Gebäudesicherheit',
'Building Short Name/Business Name': 'Gebäude Kurzname / Firmenname',
'Building or storey leaning': 'Gebäude- oder Stockwerkneigung',
'Built using the Template agreed by a group of NGOs working together as the': 'Erstellt unter Verwendung einer abgestimmten Vorlage einer Gruppe von NGOs unter dem Namen',
'Bulk Status Update': 'Massen-Statusaktualisierung',
'Bulk Uploader': 'Upload von Massendaten',
'Bundle Contents': 'Produktpaket Inhalt',
'Bundle Details': 'Produktpaket Details',
'Bundle Updated': 'Produktpaket aktualisiert',
'Bundle added': 'Produktpaket hinzugefügt',
'Bundle deleted': 'Produktpaket gelöscht',
'Bundle updated': 'Produktpaket aktualisiert',
'Bundle': 'Produktpaket',
'Bundles': 'Produktpakete',
'Burn ICU': 'Verbrennungseinheit',
'Burn': 'Brennen',
'Burned/charred': 'Verbrannt / verkohlt',
'BÜMA valid until': 'BÜMA gültig bis',
'By': 'Nach',
'By Einrichtung': 'Nach Einrichtung',
'By Facility': 'Nach Einrichtung',
'By Inventory': 'Nach Bestand',
'CBA Women': 'Frauen CBA',
'CSS file %s not writable - unable to apply theme!': 'CSS Datei %s nicht beschreibbar - Motiv kann nicht angewendet werden!',
'Calculate': 'Starte Berechnung',
'Camp Coordination/Management': 'Camp Koordinierung / Management',
'Camp Service Details': 'Details zu Camp Leistung',
'Camp Service added': 'Camp Leistung hinzugefügt',
'Camp Service deleted': 'Camp Leistung gelöscht',
'Camp Service updated': 'Leistung des Camps aktualisiert',
'Camp Services': 'Leistungen des Camps',
'Camp Type Details': 'Details zum Camp Typ',
'Camp Type added': 'Camp Typ hinzugefügt',
'Camp Type deleted': 'Camp Typ gelöscht',
'Camp Type updated': 'Camp Typ aktualisiert',
'Camp Type': 'Camp Typ',
'Camp Types and Services': 'Camp Typen und Leistungen',
'Camp Types': 'Camp Typen',
'Camp added': 'Camp hinzugefügt',
'Camp deleted': 'Camp gelöscht',
'Camp updated': 'Camp aktualisiert',
'Camp': 'Camp',
'Campaign ID': 'Kampagnen ID',
'Camps': 'Camps',
'Can only disable 1 record at a time!': 'Ein Datensatz kann nur einzeln deaktiviert werden!',
'Can read PoIs either from an OpenStreetMap file (.osm) or mirror.': 'Kann PoIs nur aus einer OpenStreetMap Datei (.osm) oder einem mirror lesen.',
'Cancel': 'Abbrechen',
'Cancel Log Entry': 'Protokolleintrag abbrechen',
'Cancel Shipment': 'Lieferung stornieren',
'Canceled': 'Abgebrochen',
'Cancelled': 'Abgesagt',
'Candidate Matches for Body %s': 'Übereinstimmung des Kandidaten mit Körper %s',
'Canned Fish': 'Fischkonserven',
'Cannot be empty': 'Darf nicht leer sein',
'Cannot disable your own account!': 'Eigenes Konto kann nicht deaktiviert werden.',
'Capacity': 'Maximale Kapazität',
'Capacity evaluated adding all defined housing unit capacities': 'Die Kapazität der Unterkunft wurde ermittelt aus der Summe der Kapazität der einzelnen Unterkunftseinheiten',
'Capacity of the housing unit for people who need to stay both day and night': 'Kapazität der Unterkunftseinheit für Personen die tags und nachts dort untergebracht sind',
'Capacity of the shelter as a number of people': 'Kapazität der Unterkunft in Zahl von Personen',
'Capacity (Max Persons)': 'Kapazität (Maximale Zahl von Personen)',
'Capture Information on Disaster Victim groups (Tourists, Passengers, Families, etc.)': 'Erfassung von Informationen über Opfergruppen einer Katastrophe (Touristen, Fahrgäste, Familien, etc.)',
'Capture Information on each disaster victim': 'Erfassung von Informationen über jedes Opfer einer Katastrophe.',
'Capturing the projects each organization is providing and where': 'Erfassen der Projekte, die von jeder Organisation bereitgestellt werden und wo',
'Cardiology': 'Kardiologie',
'Cargo Pier Depth': 'Wassertiefe Frachtpier',
'Case added': 'Fall angelegt',
'Case Archived': 'Fall Archiviert',
'Case Closed': 'Fall Abgeschlossen',
'Case closed on': 'Fall abgeschlossen am',
'Case Details': 'Details zum Fall',
'Case details updated': 'Fall aktualisiert',
'Case Flags': 'Fall Flaggen',
'Case Flag added': 'Fall Flagge hinzugefügt',
'Case Flag Details': 'Details zur Fall Flagge',
'Case Flag updated': 'Fall Flagge aktualisiert',
'Case Number': 'Fallnummer',
'Case Status': 'Fallstatus',
'Case Statuses': 'Fallstatus',
'Case Status upon Completion': 'Fallstatus nach Durchführung',
'Cases': 'Fälle',
'Cases with this flag are not transferable': 'Fälle mit dieser Flagge sind nicht transferierbar',
'Cases with this status are closed': 'Fälle mit diesem Status sind abgeschlossen',
'Cases with this status are not transferable': 'Fälle mit diesem Status sind nicht transferierbar',
'Cash': 'Bargeld',
'Cassava': 'Maniok',
'Casual Labor': 'Gelegenheitsarbeit',
'Casualties': 'Todesopfer',
'Catalog Details': 'Details zum Katalog',
'Catalog Item added': 'Katalog Eintrag hinzugefügt',
'Catalog Item deleted': 'Katalog Eintrag gelöscht',
'Catalog Item updated': 'Katalog Eintrag aktualisiert',
'Catalog Items': 'Katalog Einträge',
'Catalog added': 'Katalog hinzugefügt',
'Catalog deleted': 'Katalog gelöscht',
'Catalog updated': 'Katalog aktualisiert',
'Catalog': 'Katalog',
'Catalogs': 'Kataloge',
'Categories': 'Kategorien',
'Category': 'Kategorie',
'Ceilings, light fixtures': 'Höchstgrenzen, Licht Ausstattungsmerkmal',
'Central point to record details on People': 'Zentrale Personenregistrierungsstelle',
'Certificate Catalog': 'Zertifikatskatalog',
'Certificate Details': 'Details zum Zertifikat',
'Certificate Status': 'Status des Zertifikats',
'Certificate added': 'Zertifikat hinzugefügt',
'Certificate deleted': 'Zertifikat gelöscht',
'Certificate updated': 'Zertifikat aktualisiert',
'Certificate': 'Zertifikat',
'Certificates': 'Zertifikate',
'Certification Details': 'Zertifizierungsdetails',
'Certification added': 'Zertifizierung hinzugefügt',
'Certification deleted': 'Zertifizierung gelöscht',
'Certification updated': 'Zertifizierung aktualisiert',
'Certification': 'Zertifizierung',
'Certifications': 'Zertifizierungen',
'Certifying Organization': 'Zertifizierende Organisation',
'Change Password': 'Passwort ändern',
'Channel': 'Kanal',
'Check-in date': 'Check-In Datum',
'Check-in denied': 'Check-in verweigert',
'Check-in overdue': 'Check-in überfällig',
'Check-out date': 'Check-Out Datum',
'Check-out denied': 'Check-out verweigert',
'Check ID': 'ID Prüfen',
'Check Request': 'Anfrage prüfen',
'Check for errors in the URL, maybe the address was mistyped.': 'Prüfen Sie auf Fehler in der URL, vielleicht wurde die Adresse falsch eingegeben.',
'Check if the URL is pointing to a directory instead of a webpage.': 'Prüfen Sie ob die URL auf ein Verzeichnis anstelle einer Webseite verweist',
'Check outbox for the message status': 'Überprüfen sie den Status der Nachricht im Nachrichtenausgang',
'Check to delete': 'Anwahl zum Löschen',
'Check Transferability': 'Transferierbarkeit prüfen',
'Check transferability for all current cases': 'Transferierbarkeit für alle aktuellen Fälle prüfen',
'Check': 'Prüfen',
'Checked': 'Geprüft',
'Checked-in successfully!': 'Check-in erfolgreich!',
'Checked-out successfully!': 'Check-out erfolgreich!',
'Checklist created': 'Prüfliste erstellt',
'Checklist deleted': 'Prüfliste gelöscht',
'Checklist of Operations': 'Checkliste für Operationen',
'Checklist updated': 'Checkliste aktualisiert',
'Checklist': 'Prüfliste',
'Checkpoint Advice': 'Checkpoint Hinweise',
'Chemical Hazard': 'Chemische Gefahr',
'Chemical, Biological, Radiological, Nuclear or High-Yield Explosive threat or attack': 'Chemische, Biologische, Radiologische, Nukleare order höchst explosive Gefahr oder Angriff',
'Chicken': 'Huhn',
'Child (2-11)': 'Kind (2-11)',
'Child (< 18 yrs)': 'Kind (< 18 Jahre)',
'Child Abduction Emergency': 'Kindesentführung Notfall',
'Child headed households (<18 yrs)': 'Kindgeführte Haushalte (<18 Jahre)',
'Child': 'Kind',
'Children (2-5 years)': 'Kinder (2-5 Jahre)',
'Children (5-15 years)': 'Kinder (5-15 Jahre)',
'Children (< 2 years)': 'Kinder (< 2 Jahre)',
'Children in adult prisons': 'Kinder in Gefängnissen für Erwachsene',
'Children in boarding schools': 'Kinder in Internaten',
'Children in homes for disabled children': 'Kinder in Unterkünften für behinderte Kinder',
'Children in juvenile detention': 'Kinder in Jugendstrafheimen',
'Children in orphanages': 'Kinder in Waisenhäusern',
'Children living on their own (without adults)': 'Alleinlebende Kinder (ohne Erwachsene)',
'Children not enrolled in new school': 'Kinder, die nicht in der neuen Schule registriert sind',
'Children orphaned by the disaster': 'Durch die Katastrophe verwaiste Kinder',
'Children separated from their parents/caregivers': 'Von Ihren Eltern/Betreuern getrennte Kinder',
'Children that have been sent to safe places': 'Kinder die an sichere Orte gesendet wurden',
'Children who have disappeared since the disaster': 'Kinder, die seit der Katastrophe verschwunden sind',
'Chinese (Taiwan)': 'Chinesisch (Taiwan)',
'Cholera Treatment Capability': 'Cholera Behandlungsmöglichkeiten',
'Cholera Treatment Center': 'Cholera Behandlungscenter',
'Cholera Treatment': 'Cholera Behandlung',
'Cholera-Treatment-Center': 'Cholera-Behandlung-Center',
'Choose a new posting based on the new evaluation and team judgement. Severe conditions affecting the whole building are grounds for an UNSAFE posting. Localised Severe and overall Moderate conditions may require a RESTRICTED USE. Place INSPECTED placard at main entrance. Post all other placards at every significant entrance.': 'Wählen Sie eine neue Meldung basierend der neuen Bewertung und Teamurteil. Schwerwiegende Bedingungen, die das gesamte Gebäude betreffen sind der Grund für eine UNSICHER Markierung. Lokalisierte schwere und insgesamt moderate Bedingungen können möglicherweise eine eingeschränkte Verwendung erfordern. Platziere GEPRÜFT Plakat am Haupteingang Positionieren Sie alle anderen Schilder auf jeden wichtigen Eingang.',
'Church': 'Kirche',
'City': 'Ort/Stadt',
'City / Town / Village': 'Stadt / Ort / Dorf',
'Civil Emergency': 'Ziviler Notfall',
'Cladding, glazing': 'Verkleidung, Verglasung',
'Clear': 'Löschen',
'Clear filter': 'Filter zurücksetzen',
'Click on the link %(url)s to reset your password': 'Klicken sie auf den Link %(url)s um ihr Kennwort zurückzusetzen',
'Click on the link %(url)s to verify your email': 'Klicken sie auf den Link %(url)s zum Überprüfen ihrer EMail Adresse',
'Click where you want to open Streetview': 'Auswahl um Streetview zu öffnen',
'Client Registration': 'Personenregistrierung',
'Client Reservation': 'Personenreservierung',
'Client was already checked-in': 'Person war bereits eingecheckt',
'Client was already checked-out': 'Person war bereits ausgecheckt',
'Clinical Laboratory': 'Klinisches Labor',
'Clinical Operations': 'Klinikbetrieb',
'Clinical Status': 'Klinischer Status',
'Closed': 'Geschlossen',
'Closed at': 'Geschlossen am',
'Closed Cases': 'Abgeschlossene Fälle',
'Clothing': 'Kleidung',
'Cluster Details': 'Details zum Cluster',
'Cluster Distance': 'Cluster Abstand',
'Cluster Subsector Details': 'Cluster Teilbereich Details',
'Cluster Subsector added': 'Cluster Teilbereich hinzugefügt',
'Cluster Subsector deleted': 'Cluster Teilbereich gelöscht',
'Cluster Subsector updated': 'Cluster Teilbereich aktualisiert',
'Cluster Subsector': 'Cluster Teilsektor',
'Cluster Subsectors': 'Cluster Teilsektoren',
'Cluster Threshold': 'Cluster Schwellwert',
'Cluster added': 'Cluster hinzugefügt',
'Cluster deleted': 'Cluster gelöscht',
'Cluster updated': 'Cluster aktualisiert',
'Cluster': 'Cluster',
'Cluster(s)': 'Cluster',
'Clusters': 'Cluster',
'Cold Wave': 'Kältewelle',
'Collapse, partial collapse, off foundation': 'Zusammengefallen, teilweise zusammengefallen, ohne Unterbau',
'Collective center': 'Kollektivcenter',
'Color for Underline of Subheadings': 'Farbe der Unterstreichungslinie von untergeordneten Überschriften',
'Color of Buttons when hovering': 'Farbe von Schaltflächen beim drüberstreichen',
'Color of bottom of Buttons when not pressed': 'Farbe der unteren Seite von Schaltflächen die nicht gedrückt sind',
'Color of bottom of Buttons when pressed': 'Farbe der unteren Seite von Schaltflächen beim Drücken von Tasten',
'Color of dropdown menus': 'Farbe des Dropdown-Menüs',
'Color of selected Input fields': 'Farbe der ausgewählten Eingabefelder',
'Color of selected menu items': 'Farbe ausgewählter Menüpunkte',
'Columns, pilasters, corbels': 'Säulen, Pfeiler, Konsolen',
'Combined Method': 'Kombinierte Methode',
'Come back later. Everyone visiting this site is probably experiencing the same problem as you.': 'Kommen Sie später noch einmal wieder. Jeder der diese Seite besucht hat derzeit wahrscheinlich das gleiche Problem wie Sie :-( .',
'Come back later.': 'Kommen Sie doch später noch einmal wieder :-( ',
'Comments': 'Kommentare',
'Comments permitted?': 'Kommentare zugelassen?',
'Commercial/Offices': 'Kommerziell / Büros',
'Commit Date': 'Datum der Einstellung',
'Commit from %s': 'Einstellung von %s',
'Commit': 'Zusage',
'Commit Status': 'Status der Zusage',
'Commiting a changed spreadsheet to the database': 'Ein verändertes Spreadsheet in der Datenbank einstellen.',
'Commitment Added': 'Zusage hinzugefügt',
'Commitment Canceled': 'Zusage abgebrochen',
'Commitment Details': 'Details zur Zusage',
'Commitment Item Details': 'Details zum zugesagten Artikel',
'Commitment Item added': 'Zugesagten Artikel hinzugefügt',
'Commitment Item deleted': 'Zugesagten Artikel gelöscht',
'Commitment Item updated': 'Zugesagten Artikel aktualisiert',
'Commitment Items': 'Zugesagte Artikel',
'Commitment Status': 'Status der Zusage',
'Commitment Updated': 'Zusage aktualisiert',
'Commitment': 'Zusage',
'Commitments': 'Zusagen',
'Committed By': 'Zugesagt durch',
'Committed': 'Zugesagt',
'Committed Items': 'Zugesagte Artikel',
'Committed Skills': 'Zugesagte Fähigkeiten',
'Committing Inventory': 'Zusageninventar',
'Communication problems': 'Kommunikationsprobleme',
'Community Health Center': 'Gesundheitszentrum der Gemeinschaft',
'Community Member': 'Mitglied der Gemeinschaft',
'Competencies': 'Kompetenzen',
'Competency Details': 'Details zu den Kompetenzen',
'Competency Rating Catalog': 'Kompetenzbewertungskatalog',
'Competency Rating Details': 'Details zur Kompetenzbewertung',
'Competency Rating added': 'Kompetenzbewertung hinzugefügt',
'Competency Rating deleted': 'Kompetenzbewertung gelöscht',
'Competency Rating updated': 'Kompetenzbewertung aktualisiert',
'Competency Ratings': 'Kompetenzbewertungen',
'Competency added': 'Kompetenz hinzugefügt',
'Competency deleted': 'Kompetenz gelöscht',
'Competency updated': 'Kompetenz aktualisiert',
'Competency': 'Kompetenz',
'Complete': 'Vollständig',
'Completed': 'Beendet',
'Complete Stock Adjustment': 'Anpassen des gesamten Bestandes',
'Completion Question': 'Abschlussfrage',
'Complexion': 'Gesichtsfarbe',
'Compose': 'Erstellen',
'Compromised': 'Gefährdet',
'Concrete frame': 'Betonrahmen',
'Concrete shear wall': 'Betonscherwand',
'Condition': 'Bedingung',
'Conduct a Disaster Assessment': 'Durchführung einer Katastrophenbeurteilung',
'Configuration': 'Konfiguration',
'Configurations': 'Konfigurationen',
'Configure Run-time Settings': 'Laufzeiteinstellungen konfigurieren',
'Confirm Shipment Received': 'Bestätigen der erhaltenen Lieferung',
'Confirmed': 'Bestätigt',
'Confirming Organization': 'Organisation bestätigen',
'Conflict Details': 'Details zum Konflikt',
'Conflict Resolution': 'Konfliktlösung',
'Connection': 'Verbindung',
'Connect Parser': 'Verbindungsparser',
'Consignment Note': 'Warenbegleitschein',
'Constraints Only': 'Nur Bedingungen',
'Consumable': 'Verbrauchsartikel',
'Contact Data': 'Kontakt Daten',
'Contact Details': 'Details zum Kontakt',
'Contact Info': 'Kontaktinformationen',
'Contact Information Added': 'Konraktinformationen hinzugefügt.',
'Contact Information Deleted': 'Kontaktinformationen gelöscht',
'Contact Information Updated': 'Kontakt Informationen aktualisiert',
'Contact Information': 'Kontaktinformationen',
'Contact Method': 'Kontaktmethode',
'Contact Name': 'Name des Ansprechpartners',
'Contact Person': 'Kontaktperson',
'Contact Person / Camp Owner': 'Kontaktperson / Camp-Betreiber',
'Contact Phone': 'Telefonnummer des Kontaktes',
'Contact details': 'Details zum Kontakt',
'Contact information added': 'Kontaktinformationen hinzugefügt',
'Contact information deleted': 'Kontaktinformationen gelöscht',
'Contact information updated': 'Kontaktinformationen aktualisiert',
'Contact Us': 'Kontaktieren Sie uns',
'Contact us': 'Kontaktieren Sie uns',
'Contact': 'Kontakt',
'Contacts': 'Kontakte',
'Contact Description': 'Kontaktbeschreibung',
'Content': 'Inhalt',
'Contents': 'Inhalte',
'Content Management': 'Content Management',
'Content Management System': 'Content Management System',
'Contract End Date': 'Ablaufzeit des Vertrags',
'Contributor': 'Mitwirkung',
'Conversion Tool': 'Umrechnungstool',
'Cooking NFIs': 'Kochen NFIs',
'Cooking Oil': 'Speiseöl',
'Coordinate Conversion': 'Koordinatentransformation',
'Coping Activities': 'Bewältigungsaktivitäten',
'Copy': 'Kopieren',
'Cost Type': 'Kostentyp',
'Cost per Megabyte': 'Kosten pro Megabyte',
'Cost per Minute': 'Kosten pro Minute',
'Count': 'Zahl',
'Country of Residence': 'Land des Wohnsitzes',
'Country': 'Land',
'County': 'Bezirk',
'County / District': 'Kreis / Bezirk',
'Course Catalog': 'Katalog der Kurse',
'Course Certificate Details': 'Details zum Kurszertifikat ',
'Course Certificate added': 'Kurszertifikat hinzugefügt',
'Course Certificate deleted': 'Kurszertifikat gelöscht',
'Course Certificate updated': 'Kurszertifikat aktualisiert',
'Course Certificates': 'Kurszertifikate',
'Course Details': 'Details zum Kurs',
'Course added': 'Kurs hinzugefügt',
'Course deleted': 'Kurs gelöscht',
'Course updated': 'Kurs aktualisiert',
'Course': 'Kurs',
'Create': 'Anlegen',
'Create & manage Distribution groups to receive Alerts': 'Erstellen und Verwalten von Verteilergruppen um Warnhinweise zu empfangen',
'Create Activity Report': 'Aktivitätsreport erstellen',
'Create Activity Type': 'Aktivitätstyp erstellen',
'Create Activity': 'Aktivität erstellen',
'Create Airport': 'Fluhafen erstellen',
'Create Allowance Information': 'Information zum Taschengeld erstellen',
'Create Appointment': 'Termin erstellen',
'Create Appointment Type': 'Terminart erstellen',
'Create Assessment': 'Beurteilung erstellen',
'Create Asset': 'Anlage erstellen',
'Create Bed Type': 'Bettentyp erstellen',
'Create Brand': 'Marke erstellen',
'Create Budget': 'Budget erstellen',
'Create Bundle': 'Produktpaket erstellen',
'Create Case': 'Fall erstellen',
'Create Case Flag': 'Fall Flagge erstellen',
'Create Case Status': 'Fallstatus erstellen',
'Create Catalog Item': 'Katalogeintrag erstellen',
'Create Catalog': 'Katalog erstellen',
'Create Certificate': 'Zertifikat erstellen',
'Create Checklist': 'Prüfliste erstellen',
'Create Cholera Treatment Capability Information': 'Fügen Sie Informationen zur Möglichkeit der Behandlung von Cholerafällen hinzu',
'Create Cluster Subsector': 'Cluster Teilbereich erstellen',
'Create Cluster': 'Cluster erstellen',
'Create Competency Rating': 'Kompetenzbewertung erstellen',
'Create Contact': 'Kontaktperson erstellen',
'Create Course': 'Kurs erstellen',
'Create Dead Body Report': 'Leichenbericht erstellen',
'Create Department': 'Abteilung erstellen',
'Create Event': 'Neues Ereignis erstellen',
'Create Event Type': 'Ereignistyp erstellen',
'Create Facility': 'Einrichtung erstellen',
'Create Facility Type': 'Einrichtungstyp erstellen',
'Create Feature Layer': 'Kartenebene für Objektart erstellen',
'Create Group Entry': 'Gruppeneintrag erstellen',
'Create Group': 'Gruppe erstellen',
'Create Heliport': 'Hubschrauberlandeplatz erstellen',
'Create Hospital': 'Krankenhaus erstellen',
'Create Identification Report': 'Identifizierungsbericht erstellen',
'Create Impact Assessment': 'Folgenabschätzung erstellen',
'Create Incident Report': 'Vorfallbericht erstellen',
'Create Incident Type': 'Vorfalltyp erstellen',
'Create Incident': 'Vorfall erstellen',
'Create Item Category': 'Element Kategorie erstellen',
'Create Item Pack': 'Artikelgruppe erstellen',
'Create Item': 'Neuen Artikel anlegen',
'Create Job Title': 'Berufsbezeichnung erstellen',
'Create Kit': 'Ausstattung (Kit) anlegen',
'Create Kitting': 'Kit zusammenstellen',
'Create Layer': 'Kartenebene anlegen',
'Create Location': 'Standort anlegen',
'Create Location Hierarchy': 'Standorthierarchie anlegen',
'Create Map Profile': 'Kartenkonfiguration anlegen',
'Create Map Style': 'Kartensymbolisierung erstellen',
'Create Marker': 'Marker/Symbol anlegen',
'Create Member': 'Mitglied erstellen',
'Create Membership Type': 'Mitgliedstyp erstellen',
'Create Mobile Impact Assessment': 'Erstellen Sie Mobile Folgenabschätzung',
'Create Note': 'Notiz erstellen',
'Create Office': 'Büro anlegen',
'Create Office Type': 'Bürotyp anlegen',
'Create Organization': 'Organisation anlegen',
'Create Organization Type': 'Organisationstyp anlegen',
'Create Personal Effects': 'Persönlicher Habe anlegen',
'Create PoI Type': 'PoI-Typ erstellen',
'Create Point of Interest': 'PoI erstellen',
'Create Post': 'POST erstellen',
'Create Program': 'Programm erstellen',
'Create Project': 'Projekt anlegen',
'Create Projection': 'Kartenprojektion anlegen',
'Create Rapid Assessment': 'Schnell-Beurteilung anlegen',
'Create Report': 'Bericht anlegen',
'Create Repository': 'Repository anlegen',
'Create Request': 'Anfrage anlegen',
'Create Request Template': 'Anfragevorlage anlegen',
'Create Residents Report': 'Bewohnerliste anlegen',
'Create Resource': 'Ressource anlegen',
'Create River': 'Neuen Fluss anlegen',
'Create Role': 'Neue Rolle anlegen',
'Create Room': 'Neues Zimmer anlegen',
'Create Seaport': 'Seehafen erstellen',
'Create Scenario': 'Neues Szenario anlegen',
'Create Sector': 'Neuen Bereich anlegen',
'Create Series': 'Serie erstellen',
'Create Service Profile': 'Neues Leistungsprofil anlegen',
'Create Shelter Service': 'Neue Unterkunft anlegen',
'Create Shelter Status': 'Unterkunftsstatus erstellen',
'Create Shelter Type': 'Neue Art der Unterkunft anlegen',
'Create Shelter': 'Neue Unterkunft anlegen',
'Create Skill Type': 'Art der Qualifikation / Fähigkeit anlegen',
'Create Skill': 'Fähigkeiten / Qualifikationen anlegen',
'Create Staff Member': 'Neuen Mitarbeiter anlegen',
'Create Staff Type': 'Mitarbeitertyp erstellen',
'Create Status': 'Neuen Status anlegen',
'Create Supplier': 'Neuen Lieferanten anlegen',
'Create Task': 'Neue Aufgabe anlegen',
'Create Theme': 'Neues Thema anlegen',
'Create User': 'Neuen Benutzer anlegen',
'Create Training Event': 'Neuen Schulungskurs anlegen',
'Create Vehicle': 'Fahrzeug erstellen',
'Create Vehicle Type': 'Fahrzeugtyp erstellen',
'Create Volunteer': 'Neuen Freiwilligen anlegen',
'Create Volunteer Role': 'Freiwilligenrolle erstellen',
'Create Warehouse': 'Neues Warenlager anlegen',
'Create Warehouse Type': 'Warenlagertyp erstellen',
'Create a Person': 'Neue Person anlegen',
'Create a group entry in the registry.': 'Erstellen Sie eine neue Gruppe in der Registry.',
'Create, enter, and manage surveys.': 'Erstellen, Eingabe und Verwaltung von Umfragen.',
'Created By': 'Erstellt von',
'Created On': 'Erstellt am',
'Creation of Surveys': 'Erstellung von Umfragen',
'Credential Details': 'Details zur Qualifikation',
'Credential added': 'Qualifikation hinzugefügt',
'Credential deleted': 'Qualifikation gelöscht',
'Credential updated': 'Qualifikation aktualisiert',
'Credentialling Organization': 'Bescheinigende Organisation',
'Credentials': 'Qualifikationen',
'Credit Card': 'Kreditkarte',
'Crime': 'Kriminalität',
'Criteria': 'Kriterien',
'CTN': 'CTN',
'Currency': 'Währung',
'Current': 'Aktuell',
'Current Address': 'Aktuelle Adresse',
'Current Appointments': 'Aktuelle Termine',
'Current Cases': 'Aktuelle Fälle',
'Current Entries': 'Aktuelle Einträge',
'Current Group Members': 'Aktuelle Gruppemmitglieder',
'Current Home Address': 'Aktuelle Heimatadresse',
'Current Identities': 'Aktuelle Identitäten',
'Current Location': 'Aktueller Standort',
'Current Log Entries': 'Aktuelle Protokolleinträge',
'Current Memberships': 'Aktuelle Mitgliedschaften',
'Current Needs': 'Aktuelle Bedarfsmeldungen',
'Current Population': 'Aktuelle Belegungzahl',
'Current Population Availability (Day and Night)': 'Aktuelle maximale Belegungszahl (Tag und Nacht)',
'Current Records': 'Aktuelle Datensätze',
'Current Registrations': 'Aktuellen Registrierungen',
'Current Status': 'Aktueller Status',
'Current Team Members': 'Aktuelle Team Mitglieder',
'Current Total': 'Aktuelle Summe',
'Current Twitter account': 'Aktueller Benutzeraccount bei Twitter',
'Current community priorities': 'Aktuelle Priorisierung in der Community',
'Current general needs': 'Aktueller allgemeiner Bedarf',
'Current greatest needs of vulnerable groups': 'Wichtigste Bedürfnisse der gefährdeten Gruppen',
'Current health problems': 'Derzeitige Gesundheitsprobleme',
'Current number of patients': 'Aktuelle Anzahl von Patienten',
'Current problems, categories': 'Aktuelle Probleme, Kategorien',
'Current problems, details': 'Aktuelle Probleme, Details',
'Current request': 'Aktuelle Anfrage',
'Current response': 'Aktuelle Antwort',
'Current session': 'Aktuelle Sitzung',
'Currently no Certifications registered': 'Derzeit sind keine Zertifizierungen registriert',
'Currently no Competencies registered': 'Derzeit sind keine Kompetenzen registriert',
'Currently no Course Certificates registered': 'Derzeit sind keine Kurszertifikate registriert',
'Currently no Credentials registered': 'Derzeit sind keine Qualifikationen registriert',
'Currently no Missions registered': 'Derzeit sind keine Aufträge registriert',
'Currently no Skill Equivalences registered': 'Derzeit sind keine Fähigkeits-Vergleichbarkeiten registriert',
'Currently no Trainings registered': 'Derzeit keine Schulungen registriert',
'Currently no entries in the catalog': 'Derzeit keine Einträge im Katalog',
'Customs Capacity': 'Zollkapazität',
'Customs Warehousing Storage Capacity': 'Zollwarenlager Kapazität',
'DNA Profile': 'DNA Profil',
'DNA Profiling': 'DNS-Profiling',
'Dam Overflow': 'Dam Überlauf',
'Damage': 'Beschädigung',
'Dangerous Person': 'Gefährliche Person',
'Dashboard': 'Dashboard',
'Data uploaded': 'Daten hochgeladen',
'Data': 'Daten',
'Database': 'Datenbank',
'Date & Time': 'Datum und Zeit',
'Date Available': 'Verfügbar ab',
'Date Created': 'Erstellt am',
'Date Due': 'Fällig am',
'Date for Follow-up': 'Wiedervorlage am',
'Date is required when marking the appointment as completed': 'Datumsangabe erforderlich wenn der Termin als beendet markiert werden soll',
'Date Joined': 'Eintrittsdatum',
'Date Modified': 'Geändert am',
'Date Published': 'Veröffentlicht am',
'Date Question': 'Gefragt am',
'Date Received': 'Erhalten am',
'Date Released': 'Datum der Veröffentlichung',
'Date Requested': 'Angefordert am',
'Date Required': 'Benötigt am',
'Date Required Until': 'Benötigt bis',
'Date Needed By': 'Benötigt ab',
'Date Sent': 'Gesendet am',
'Date Taken': 'Verwendet am',
'Date unknown': 'Datum unbekannt',
'Date Until': 'Datum bis',
'Date and Time': 'Datum und Zeit',
'Date and time this report relates to.': 'Datum und Uhrzeit auf die sich dieser Bericht bezieht.',
'Date of Birth': 'Geburtsdatum',
'Date of Latest Information on Beneficiaries Reached': 'Datum von aktuellen Informationen der Finanzhilfen erreicht',
'Date of Report': 'Datum des Berichts',
'Date Resigned': 'Datum der Kündigung',
'Date': 'Datum',
'Date/Time of Find': 'Datum/Zeit des Fundes',
'Date/Time when found': 'Datum / Uhrzeit, wann festgestellt',
'Date/Time when last seen': 'Datum / Uhrzeit, wann zuletzt gesehen',
'Date/Time': 'Datum/Zeit',
'Day': 'Tag',
'Days': 'Tage',
'De-duplicate': 'Bestätige Duplikat',
'De-duplicator': 'Duplikate entfernen',
'Dead Body Details': 'Details zur Leiche ',
'Dead Body Reports': 'Leichenbericht',
'Dead Body': 'Leiche',
'Dead body report added': 'Leichenbericht hinzugefügt',
'Dead body report deleted': 'Leichenbericht gelöscht',
'Dead body report updated': 'Leichenbericht aktualisiert',
'Deaths in the past 24h': 'Tote der letzten 24h',
'Deaths/24hrs': 'Todesfälle/24std',
'Decimal Degrees': 'Dezimalgrade',
'Decision': 'Entscheidung',
'Decomposed': 'Zerlegt',
'Default Base layer?': 'Standard Hintergrundkartenebene?',
'Default Event Type': 'Standard Ereignistyp',
'Default Location': 'Standard Gebiet/Standort',
'Default Height of the map window.': 'Standardhöhe des Kartenfensters',
'Default Map': 'Standard-Kartenfenster',
'Default Marker': 'Standardsymbol',
'Default Width of the map window.': 'Standardbreite des Kartenfensters.',
'Default Status': 'Standard Status',
'Default map question': 'Standard Kartenfrage',
'Default?': 'Standard?',
'Default synchronization policy': 'Standard-Synchronisationsverfahren',
'Defecation area for animals': 'Kotbereich für Tiere',
'Define Scenarios for allocation of appropriate Resources (Human, Assets & Facilities).': 'Definieren Sie Szenarien für die Zuordnung der entsprechenden Ressourcen (Menschen, Anlagen und Einrichtungen).',
'Defines the icon used for display of features on handheld GPS.': 'Definiert das Symbol, welches für die Anzeige der Objekte auf mobilen GPS-Geräten verwendet wird.',
'Defines the icon used for display of features on interactive map & KML exports.': 'Definiert das Symbol, welches für die Anzeige der Objekte auf der interaktiven Karte sowie für die KML Exporte verwendet wird.',
'Defines the marker used for display & the attributes visible in the popup.': 'Definiert das Symbol, das für die Anzeige und die Attribute im Popup-Fenster verwendet wird.',
'Degrees must be a number between -180 and 180': 'Grad muss eine Zahl zwischen -180 und 180 sein.',
'Delete Allowance Information': 'Informationen zum Taschengeld löschen',
'Delete Alternative Item': 'Alternativen Artikel löschen',
'Delete Appointment Type': 'Terminart löschen',
'Delete Assessment Summary': 'Zusammenfassung der Beurteilung löschen',
'Delete Assessment': 'Beurteilung löschen',
'Delete Asset Log Entry': 'Löschen des Protokolleintrags der Anlage',
'Delete Asset': 'Anlage löschen',
'Delete Baseline Type': 'Lösche Typ des Referenzdatums',
'Delete Baseline': 'Referenzdatum löschen',
'Delete Brand': 'Lösche Marke',
'Delete Budget': 'Lösche Budget',
'Delete Bundle': 'Produktpaket löschen',
'Delete Case Flag': 'Fall Flagge löschen',
'Delete Case Status': 'Fallstatus löschen',
'Delete Catalog Item': 'Lösche Katalogeintrag',
'Delete Catalog': 'Katalog löschen',
'Delete Certificate': 'Zertifikat löschen',
'Delete Certification': 'Delete Zertifizierung',
'Delete Cluster Subsector': 'Cluster Teilbereich löschen',
'Delete Cluster': 'Cluster löschen',
'Delete Commitment Item': 'Zugesagten Artikel löschen',
'Delete Commitment': 'Zusage löschen',
'Delete Competency Rating': 'Kompetenzbewertung löschen',
'Delete Competency': 'Kompetenz löschen',
'Delete Contact Information': 'Kontaktinformation löschen',
'Delete Course Certificate': 'Lösche Kurszertifikat',
'Delete Course': 'Lösche Kurs',
'Delete Credential': 'Qualifikation löschen',
'Delete Document': 'Dokument löschen',
'Delete Donor': 'Spender löschen',
'Delete Entry': 'Eintrag löschen',
'Delete Event': 'Ereignis löschen',
'Delete Event Type': 'Ereignistyp löschen',
'Delete Facility': 'Einrichtung löschen',
'Delete Facility Type': 'Einrichtungstyp löschen',
'Delete Feature Layer': 'Lösche Objekt Kartenebene',
'Delete Group': 'Gruppe löschen',
'Delete Hospital': 'Krankenhaus löschen',
'Delete Image': 'Grafik löschen',
'Delete Impact Type': 'Löschen des Auswirkungstyps',
'Delete Impact': 'Auswirkung löschen',
'Delete Incident Report': 'Vorfallbericht löschen',
'Delete Item Category': 'Artikel Kategorie löschen',
'Delete Item Pack': 'Artikelgruppe löschen',
'Delete Item': 'Eintrag löschen',
'Delete Job Role': 'Tätigkeit löschen',
'Delete Key': 'Schlüssel löschen',
'Delete Kit': 'Ausstattung (Kit) löschen',
'Delete Layer': 'Ebene löschen',
'Delete Level 1 Assessment': 'Stufe 1 Beurteilung löschen',
'Delete Level 2 Assessment': 'Stufe 2 Beurteilung löschen',
'Delete Location': 'Standort löschen',
'Delete Map Profile': 'Kartenkonfiguration löschen',
'Delete Marker': 'Marker/Symbol löschen',
'Delete Membership': 'Mitgliedschaft löschen',
'Delete Message': 'Nachricht löschen',
'Delete Mission': 'Auftrag löschen',
'Delete Need Type': 'Anforderungstyp löschen',
'Delete Need': 'Anforderung löschen',
'Delete Office': 'Büro löschen',
'Delete Office Type': 'Bürotyp löschen',
'Delete Organization': 'Organisation löschen',
'Delete Organization Type': 'Organisationstyp löschen',
'Delete Peer': 'Peer löschen',
'Delete Person': 'Benutzer löschen',
'Delete Photo': 'Foto löschen',
'Delete Population Statistic': 'Bevölkerungsstatistik löschen',
'Delete Position': 'Position löschen',
'Delete Project': 'Projekt löschen',
'Delete Projection': 'Koordinatensystemprojektion löschen',
'Delete Rapid Assessment': 'Schnell-Beurteilung löschen',
'Delete Received Item': 'Erhaltenen Artikel löschen',
'Delete Received Shipment': 'Erhaltene Lieferung löschen',
'Delete Record': 'Datensatz löschen',
'Delete Report': 'Bericht löschen',
'Delete Request Item': 'Lösche das Anfrageelement',
'Delete Request': 'Lösche die Anfrage',
'Delete Residents Report': 'Bewohnerliste löschen',
'Delete Resource': 'Lösche die Ressource',
'Delete Room': 'Raum löschen',
'Delete Scenario': 'Szenario löschen',
'Delete Section': 'Lösche Abschnitt',
'Delete Sector': 'Lösche Bereich',
'Delete Sent Item': 'Lösche gesendeten Artikel',
'Delete Sent Shipment': 'Lösche gesendete Lieferung',
'Delete Service Profile': 'Service-Profil löschen',
'Delete Setting': 'Einstellung löschen',
'Delete Skill Equivalence': 'Fähigkeits-Vergleichbarkeit löschen',
'Delete Skill Provision': 'Fähigkeits-Bereitstellung löschen',
'Delete Skill Type': 'Löschen des Typs der Befähigung',
'Delete Skill': 'Befähigung löschen',
'Delete Staff Type': 'Mitarbeitertyp löschen',
'Delete Status': 'Status löschen',
'Delete Subscription': 'Abonnement löschen',
'Delete Subsector': 'Teilbereich löschen',
'Delete Survey Answer': 'Umfrage - Antwort Löschen',
'Delete Survey Question': 'Umfrage - Frage löschen',
'Delete Survey Series': 'Umfrage Serie löschen',
'Delete Survey Template': 'Umfrage Vorlage löschen',
'Delete Training': 'Schulung löschen',
'Delete Unit': 'Einheit löschen',
'Delete User': 'Benutzer löschen',
'Delete Volunteer': 'Freiwilligen löschen',
'Delete Warehouse': 'Warenlager löschen',
'Delete from Server?': 'Vom Server löschen?',
'Delete': 'Löschen',
'Deliver To': 'Liefern an',
'Delphi Decision Maker': 'Delphi Entscheidungsträger',
'Demographic': 'Demografisch',
'Demonstrations': 'Vorführungen',
'Dental Examination': 'Zahnärztliche Prüfung',
'Dental Profile': 'Zahnärztliches Profil',
'Deny Check-in': 'Check-in verweigern',
'Deny Check-out': 'Check-out verweigern',
'Deny the person to check-in when this flag is set': 'Check-in der Person verweigern wenn diese Flagge gesetzt ist',
'Deny the person to check-out when this flag is set': 'Check-out der Person verweigern wenn diese Flagge gesetzt ist',
'Department / Unit': 'Abteilung / Einheit',
'Department Catalog': 'Abteilungskatalog',
'Departures': 'Abgänge',
'Dependent Person': 'Abhängige Person',
'Describe the condition of the roads to your hospital.': 'Beschreiben Sie den Zustand der Strassen zu Ihrem Krankenhaus.',
"Describe the procedure which this record relates to (e.g. 'medical examination')": 'Beschreiben Sie den Arbeitsablauf der sich auf diesen Eintrag bezieht (z. B. \\ " ärztliche Untersuchung")',
'Describe the meaning, reasons and potential consequences of this status': 'Beschreiben Sie die Bedeutung, Gründe und möglichen Konsequenzen dieses Status',
'Description of Contacts': 'Beschreibung der Kontakte',
'Description of defecation area': 'Beschreibung der Sanitäranlagen',
'Description of drinking water source': 'Beschreibung der Herkunft des Trinkwassers',
'Description of sanitary water source': 'Beschreibung der Herkunft des Sanitärwassers',
'Description of water source before the disaster': 'Beschreibung der Herkunft des Wassers vor der Katastrophe',
'Description': 'Beschreibung',
'Desire to remain with family': 'Wunsch bei der Familie zu bleiben',
'Destination': 'Ziel',
'Destroyed': 'Zerstört',
'Detailed Description/URL': 'Genaue Beschreibung/URL',
'Details field is required!': 'Detailfeld ist erforderlich!',
'Dialysis': 'Dialyse',
'Diaphragms, horizontal bracing': 'Membranen, horizontal stützen',
'Diarrhea': 'Durchfall',
'Dignitary Visit': 'Besuch des Würdenträgers',
'Direction': 'Richtung',
'Disable': 'Deaktivieren',
'Disabled participating in coping activities': 'Behinderte beteiligen sich an Bewältigungsaktivitäten',
'Disabled': 'Deaktiviert',
'Disabled?': 'Behindert?',
'Disappeared': 'Untergetaucht',
'Disaster Assessments': 'Katastrophenbeurteilungen',
'Disaster Victim Identification': 'Katastrophen Opferidentifikation',
'Disaster Victim Registry': 'Katastrophen Opferverzeichnis',
'Disaster clean-up/repairs': 'Katastrophen Reinigung/Reparaturen',
'Discharge (cusecs)': 'Ausfluss',
'Discharges/24hrs': 'Abfluss/24 Stunden',
'Discussion Forum on item': 'Diskussionsforum über Eintrag',
'Discussion Forum': 'Diskussionsforum',
'Disease vectors': 'Krankheitsvektoren',
'Dispensary': 'Ambulatorium',
'Displaced Populations': 'Heimatlose Bevölkerung',
'Displaced': 'Vertriebenen',
'Display Polygons?': 'Anzeige Polygone?',
'Display Routes?': 'Anzeige Routen?',
'Display Tracks?': 'Anzeige Wege?',
'Display Waypoints?': 'Anzeige Wegpunkte?',
'Distance between defecation area and water source': 'Distanz zwischen Sanitärbereich und Wasserquelle',
'Distance from %s:': 'Abstand von %s:',
'Distance(Kms)': 'Distanz (km)',
'Distribution groups': 'Verteilergruppen',
'Distribution': 'Verteilung',
'District': 'Bezirk',
'Rural District / District': 'Landkreis / Kreis',
'Do you really want to delete these records?': 'Sollen diese Datensätze wirklich gelöscht werden?',
'Do you want to cancel this received shipment? The items will be removed from the Inventory. This action CANNOT be undone!': 'Möchten Sie diese erhaltene Lieferung stornieren? Die Artikel werden aus dem Bestand entfernt werden. Diese Aktion kann NICHT rückgängig gemacht werden!',
'Do you want to cancel this sent shipment? The items will be returned to the Inventory. This action CANNOT be undone!': 'Möchten Sie diese abgeschickte Sendung wirklich stornieren? Die Artikel werden an die Bestandserfassung zurückgegeben werden. Diese Aktion kann NICHT rückgängig gemacht werden!',
'Do you want to receive this shipment?': 'Wollen Sie die Lieferung empfangen?',
'Do you want to send these Committed items?': 'Wollen Sie die zugesagten Artikel schicken?',
'Do you want to send this shipment?': 'Wollen Sie diese Lieferung abschicken?',
'Document Details': 'Details zum Dokument',
'Document Scan': 'Dokument Scannen',
'Document added': 'Dokument hinzugefügt',
'Document deleted': 'Dokument gelöscht',
'Document updated': 'Dokument aktualisiert',
'Documents and Photos': 'Dokumente und Fotos',
'Documents': 'Dokumente',
'Does this facility provide a cholera treatment center?': 'Verfügt diese Einrichtung über ein Behandlungscenter für Cholera?',
'Doing nothing (no structured activity)': 'Untätig (keine strukturierte Aktivität)',
'Dollars': 'Dollar',
'Domain': 'Domäne',
'Domestic chores': 'Hausarbeit',
'Donated': 'Gespendet',
'Donating Organization': 'Spendende Organisationen',
'Donation': 'Spende',
'Donations': 'Spenden',
'Donation Certificate': 'Spendenzertifikat',
'Donations Needed': 'Spenden benötigt',
'Donation Phone #': 'Spender Telefon #',
'Donor Details': 'Details zum Spender',
'Donor added': 'Spender hinzugefügt',
'Donor deleted': 'Spender gelöscht',
'Donor updated': 'Spender aktualisiert',
'Donor': 'Spender',
'Donors Report': 'Bericht zu Spendern',
'Donors': 'Spender',
'Door frame': 'Türrahmen',
'Download PDF': 'PDF herunterladen',
'Download Template': 'Vorlage herunterladen',
'Draft': 'Entwurf',
'Drainage': 'Abfluß',
'Draw on Map': 'Auf Karte anzeigen',
'Drawing up a Budget for Staff & Equipment across various Locations.': 'Aufstellung eines Budgets für Mitarbeiter und Ausrüstung über mehrere Standorte',
'Drill Down by Group': 'Recherche nach Gruppe',
'Drill Down by Incident': 'Recherche nach Vorfall',
'Drill Down by Shelter': 'Recherche nach Unterkunft',
'Drivers': 'Fahrer',
'Driver Phone Number': 'Telefonnummer des Fahrers',
'Driving License': 'Führerschein',
'Drought': 'Dürre',
'Drop-off Location for Goods?': 'Sammelstelle für Sachspenden?',
'Drugs': 'Drogen',
'Dry Dock': 'Trockendock',
'Due Follow-ups': 'Fällige Wiedervorlagen',
'Dug Well': 'Schachtbrunnen',
'Duplicate?': 'Duplikat?',
'Dust Storm': 'Staub Sturm',
'Dwelling': 'Wohnstätte',
'EMS Reason': 'EMS Grund',
'ER Status Reason': 'Status Notaufnahme Grund',
'ER Status': 'Status Notaufnahme',
'Early Recovery': 'Frühe Besserung / Bergung',
'Earthquake': 'Erdbeben',
'EasyOpt No.': 'EasyOpt Nr.',
'EasyOpt Number': 'EasyOpt Nummer',
'Edit Activity': 'Aktivität bearbeiten',
'Edit Address': 'Adresse bearbeiten',
'Edit Allowance Information': 'Informationen zum Taschengeld bearbeiten',
'Edit Alternative Item': 'Alternativen Artikel bearbeiten',
'Edit Application': 'Anwendung bearbeiten',
'Edit Appointment': 'Termin bearbeiten',
'Edit Appointment Type': 'Terminart bearbeiten',
'Edit Assessment Summary': 'Zusammenfassung fuer die Beurteilung bearbeiten',
'Edit Assessment': 'Beurteilung bearbeiten',
'Edit Asset Log Entry': 'Protokolleintrag der Beurteilung bearbeiten',
'Edit Asset': 'Beurteilung bearbeiten',
'Edit Baseline Type': 'Bearbeiten des Typs des Referenzdatums',
'Edit Baseline': 'Referenzdatum bearbeiten',
'Edit Brand': 'Marke bearbeiten',
'Edit Budget': 'Budget bearbeiten',
'Edit Bundle': 'Produktpaket bearbeiten',
'Edit Camp Service': 'Camp Leistung bearbeiten',
'Edit Camp Type': 'Camptyp bearbeiten',
'Edit Camp': 'Camp bearbeiten',
'Edit Case Details': 'Details zum Fall bearbeiten',
'Edit Case Flag': 'Fall Flagge bearbeiten',
'Edit Case Status': 'Fallstatus bearbeiten',
'Edit Catalog Item': 'Katalogeintrag bearbeiten',
'Edit Catalog': 'Katalog bearbeiten',
'Edit Certificate': 'Zertifikat bearbeiten',
'Edit Certification': 'Zertifizierung bearbeiten',
'Edit Cluster Subsector': 'Cluster Teilbereich bearbeiten',
'Edit Cluster': 'Cluster bearbeiten',
'Edit Commitment Item': 'Zugesagten Artikel bearbeiten',
'Edit Commitment': 'Zusage bearbeiten',
'Edit Competency Rating': 'Kompetenzbewertung bearbeiten',
'Edit Competency': 'Kompetenz bearbeiten',
'Edit Contact Information': 'Kontaktinformation bearbeiten',
'Edit Contact': 'Kontakt bearbeiten',
'Edit Contents': 'Inhalt bearbeiten',
'Edit Course Certificate': 'Kurszertifikat bearbeiten',
'Edit Course': 'Kurs bearbeiten',
'Edit Credential': 'Qualifikation bearbeiten',
'Edit Dead Body Details': 'Leichendetails bearbeiten',
'Edit Description': 'Beschreibung bearbeiten',
'Edit Details': 'Details bearbeiten',
'Edit Disaster Victims': 'Katastrophenopfer bearbeiten',
'Edit Document': 'Dokument bearbeiten',
'Edit Donor': 'Spender bearbeiten',
'Edit Email Settings': 'Email Einstellungen bearbeiten',
'Edit Entry': 'Eintrag bearbeiten',
'Edit Event': 'Ereignis bearbeiten',
'Edit Event Type': 'Ereignistyp bearbeiten',
'Edit Facility': 'Einrichtung bearbeiten',
'Edit Facility Type': 'Einrichtungstyp bearbeiten',
'Edit Family Member': 'Familienmitglied bearbeiten',
'Edit Feature Layer': 'Edit Objektlayer',
'Edit Flood Report': 'Flut Bericht Bearbeiten',
'Edit Gateway Settings': 'Gateway-Einstellungen bearbeiten',
'Edit Group': 'Gruppe bearbeiten',
'Edit Hospital': 'Krankenhaus bearbeiten',
'Edit Human Resource': 'Personelle Ressource bearbeiten',
'Edit Identification Report': 'Identifizierungsbericht bearbeiten',
'Edit Identity': 'Identität bearbeiten',
'Edit Image Details': 'Bild Details bearbeiten',
'Edit Impact Type': 'Typ der Auswirkung bearbeiten',
'Edit Impact': 'Auswirkungen bearbeiten',
'Edit Incident Report': 'Vorfallsbericht bearbeiten',
'Edit Inventory Item': 'Artikel des Bestands bearbeiten',
'Edit Item Category': 'Kategorie des Artikel bearbeiten',
'Edit Item Pack': 'Artikelgruppe bearbeiten',
'Edit Item': 'Artikel bearbeiten',
'Edit Job Role': 'Tätigkeit bearbeiten',
'Edit Key': 'Schlüssel bearbeiten',
'Edit Kit': 'Ausstattung (Kit) bearbeiten',
'Edit Layer': 'Kartenebene bearbeiten',
'Edit Level %d Locations?': 'Bearbeiten von Level %en Standorten?',
'Edit Level 1 Assessment': 'Stufe 1 Beurteilung bearbeiten',
'Edit Level 2 Assessment': 'Stufe 2 Beurteilung bearbeiten',
'Edit Location': 'Standort (Position) bearbeiten',
'Edit Log Entry': 'Protokolleintrag bearbeiten',
'Edit Map Profile': 'Kartenkonfiguration bearbeiten',
'Edit Map Services': 'Kartendienste bearbeiten',
'Edit Marker': 'Marker/Symbol bearbeiten',
'Edit Membership': 'Mitgliedschaft bearbeiten',
'Edit Message': 'Nachricht bearbeiten',
'Edit Messaging Settings': 'Messaging-Einstellungen bearbeiten',
'Edit Mission': 'Auftrag bearbeiten',
'Edit Modem Settings': 'Modem Settings bearbeiten',
'Edit Need Type': 'Bedarfstyp bearbeiten',
'Edit Need': 'Bedarf bearbeiten',
'Edit Office': 'Büro bearbeiten',
'Edit Options': 'Optionen bearbeiten',
'Edit Organization': 'Organisation bearbeiten',
'Edit Parameters': 'Parameter bearbeiten',
'Edit Peer Details': 'Details zu Peer bearbeiten',
'Edit Person Details': 'Details zur Person bearbeiten',
'Edit Personal Effects Details': 'Details zur persönlichen Habe bearbeiten',
'Edit Photo': 'Foto bearbeiten',
'Edit Population Statistic': 'Bevölkerungsstatistik bearbeiten',
'Edit Position': 'Position bearbeiten',
'Edit Problem': 'Problem bearbeiten',
'Edit Project': 'Projekt bearbeiten',
'Edit Projection': 'Kartenprojektion bearbeiten',
'Edit Rapid Assessment': 'Schnell-Beurteilung bearbeiten',
'Edit Received Item': 'Erhaltenen Artikel bearbeiten',
'Edit Received Shipment': 'Erhaltene Lieferung bearbeiten',
'Edit Record': 'Datensatz bearbeiten',
'Edit Registration Details': 'Details zur Registrierung bearbeiten',
'Edit Registration': 'Registrierung bearbeiten',
'Edit Request Item': 'Anfrage zu Artikel bearbeiten',
'Edit Request': 'Anfrage bearbeiten',
'Edit Residents Report': 'Bewohnerliste bearbeiten',
'Edit Resource': 'Ressource bearbeiten',
'Edit River': 'Fluss bearbeiten',
'Edit Role': 'Rolle bearbeiten',
'Edit Room': 'Raum bearbeiten',
'Edit Scenario': 'Szenario bearbeiten',
'Edit Sector': 'Bereich bearbeiten',
'Edit Sent Item': 'Gesendeten Artikel bearbeiten',
'Edit Setting': 'Einstellung bearbeiten',
'Edit Settings': 'Einstellungen bearbeiten',
'Edit Shelter Service': 'Unterkunft Leistung bearbeiten',
'Edit Shelter Type': 'Typ der Unterkunft bearbeiten',
'Edit Shelter': 'Unterkunft bearbeiten',
'Edit Skill Equivalence': 'Fähigkeits-Vergleichbarkeit bearbeiten',
'Edit Skill Provision': 'Fähigkeits-Bereitstellung bearbeiten',
'Edit Skill Type': 'Typ der Fähigkeit bearbeiten',
'Edit Skill': 'Fähigkeit bearbeiten',
'Edit Solution': 'Lösung bearbeiten',
'Edit Staff Type': 'Typ von Mitarbeitern bearbeiten',
'Edit Subscription': 'Abonnement bearbeiten',
'Edit Subsector': 'Teilbereich bearbeiten',
'Edit Survey Answer': 'Umfrage - Antwort bearbeiten',
'Edit Survey Question': 'Umfrage - Frage bearbeiten',
'Edit Survey Series': 'Umfrage - Serie bearbeiten',
'Edit Survey Template': 'Umfrage Vorlage bearbeiten',
'Edit Task': 'Aufgabe bearbeiten',
'Edit Team': 'Team bearbeiten',
'Edit Theme': 'Thema bearbeiten',
'Edit Themes': 'Themen bearbeiten',
'Edit Ticket': 'Ticket bearbeiten',
'Edit Track': 'Route bearbeiten',
'Edit Training': 'Schulung bearbeiten',
'Edit Tropo Settings': 'Tropo Einstellungen bearbeiten',
'Edit User': 'Benutzer bearbeiten',
'Edit Volunteer Availability': 'Verfügbarkeit von Freiwilligem bearbeiten',
'Edit Volunteer Details': 'Details zu Freiwilligem bearbeiten',
'Edit Warehouse': 'Warenlager bearbeiten',
'Edit Weather Widget': 'Wetter-Widget bearbeiten',
'Edit current record': 'Aktuellen Datensatz bearbeiten',
'Edit message': 'Nachricht bearbeiten',
'Edit': 'Bearbeiten',
'Editable?': 'Bearbeitbar?',
'Education materials received': 'Ausbildungsmaterialien erhalten',
'Education materials, source': 'Herkunft der Ausbildungsmaterialien',
'Education': 'Ausbildung/Schulung',
'Effects Inventory': 'Auswirkungsbestandliste',
'Eggs': 'Eier',
'Either a shelter or a location must be specified': 'Es muss entweder eine Unterkunft oder ein Standort angegeben werden',
'Either file upload or document URL required.': 'Es ist entweder ein Dateiupload oder ein URL erforderlich',
'Either file upload or image URL required.': 'Es ist entweder ein Dateiupload oder eine Bild-URL erforderlich',
'Elderly person headed households (>60 yrs)': 'Von älteren Menschen (>60 Jahren) geführte Haushalte',
'Electrical': 'elektrisch',
'Electrical, gas, sewerage, water, hazmats': 'Elektrik, Gas, Abwasser, Wasser, Gefahrgut',
'Elevated': 'Erhöht',
'Elevation': 'Höhe',
'Elevators': 'Aufzüge',
'Eligible for Allowance': 'Berechtigt für Taschengeld',
'Email Address': 'E-Mail-Adresse',
'Email Channels (Inbound)': 'E-Mail Kanäle (eingehend)',
'Email InBox': 'E-Mail Eingang',
'Email Settings': 'E-Mail-Einstellungen',
'Email settings updated': 'E-Mail-Einstellungen aktualisiert',
'Email': 'E-Mail',
'Embalming': 'Einbalsamierung',
'Embassy': 'Botschaft',
'Emergencies': 'Notfälle',
'Emergency': 'Notfall',
'Emergency Capacity Building project': 'Notfall-Kompetenzbildungsprojekt',
'Emergency Contacts': 'Notfallkontakte',
'Emergency Department': 'Notfall-Abteilung',
'Emergency Shelter': 'Notunterkunft',
'Emergency Support Facility': 'Notfall-Unterstützungseinrichtung',
'Emergency Support Service': 'Notfall-Unterstützungsdienst',
'Emergency Telecommunications': 'Notfall-Telekommunikation',
'Enable/Disable Layers': 'Layer aktivieren/deaktivieren',
'Enabled': 'Aktiviert',
'Enabled?': 'Aktiviert?',
'End Date': 'Enddatum',
'End date should be after start date': 'Enddatum muss nach dem Startdatum liegen',
'End date': 'Enddatum',
'End of Period': 'Ende des Zeitraums',
'Enter a GPS Coord': 'Geben Sie eine GPS Koordinate ein',
'Enter a name for the spreadsheet you are uploading (mandatory).': 'Geben Sie einen Namen für die Tabelle, die Sie hochladen an (obligatorisch).',
'Enter a new support request.': 'Geben Sie eine neue Unterstützungsanfrage ein.',
'Enter a unique label!': 'Geben Sie eine eindeutige Bezeichnung ein!',
'Enter a valid date before': 'Geben Sie zuvor eine gültiges Datum ein',
'Enter a valid email': 'Geben Sie eine gültige E-Mail-Adresse ein',
'Enter a valid future date': 'Geben Sie ein gültiges, zukünftiges Datum ein',
'Enter some characters to bring up a list of possible matches': 'Geben Sie einige Zeichen ein um eine Liste möglicher Übereinstimmungen anzuzeigen',
'Enter some characters to bring up a list of possible matches.': 'Geben Sie einige Zeichen ein um eine Liste von möglichen Übereinstimmungen anzuzeigen.',
'Enter tags separated by commas.': 'Geben Sie die Tags mit Komma getrennt ein.',
'Enter the same password as above': 'Wiederholen Sie das Kennwort von oben',
'Entered': 'Eingegeben',
'Entering a phone number is optional, but doing so allows you to subscribe to receive SMS messages.': 'Die Eingabe einer Telefonnummer ist freiwillig, sie erlaubt Ihnen aber SMS-Nachrichten zu abonnieren und zu empfangen.',
'Entitlement Period': 'Anspruchszeitraum',
'Entry deleted': 'Eintrag gelöscht',
'Environment': 'Umgebung/Umwelt',
'Equipment': 'Ausrüstung',
'Error Tickets': 'Fehlertickets',
'Error encountered while applying the theme.': 'Bei der Anwendung des Themas ist ein Fehler aufgetreten.',
'Error in message': 'Fehler in der Nachricht',
"Error logs for '%(app)s'": 'Fehlerprotokolle für "%(app)s"',
'Errors': 'Fehler',
'ESRI Shapefile': 'ESRI Shapefile',
'Essential Staff': 'Unverzichtbarer Mitarbeiter',
'Essential Staff?': 'Unverzichtbarer Mitarbeiter?',
'Est. Delivery Date': 'Geschätztes Lieferdatum',
'Estimated # of households who are affected by the emergency': 'Geschätzte Anzahl von Haushalten, die vom Notfall betroffen sind',
'Estimated # of people who are affected by the emergency': 'Geschätzte Anzahl von Menschen, die vom Notfall betroffen sind',
'Estimated Overall Building Damage': 'Geschätzter allgemeiner Gebäudeschaden',
'Estimated Population': 'Geschätzte Bevölkerungszahl',
'Estimated total number of people in institutions': 'Geschätzte Gesamtzahl von Menschen in Einrichtungen',
'Estimated Delivery Date': 'Voraus. Liefertermin',
'Euros': 'Euro',
'Evacuating': 'Evakuieren',
'Evacuees Capacity (Day and Night)': 'Evakuierungspotential (Tag und Nacht)',
'Evacuees Capacity (Night only)': 'Evakuierungspotential (nur Nacht)',
'Evaluate the information in this message. (This value SHOULD NOT be used in public warning applications.)': 'Informationen in dieser Nachricht bewerten. (Dieser Wert sollte NICHT in öffentlichen Warnung verwendet werden.)',
'Event Details': 'Details zum Ereignis',
'Event Registration': 'Ereignisregistrierung',
'Event Type': 'Ereignistyp',
'Event Type created': 'Ereignistyp angelegt',
'Event Type deleted': 'Ereignistyp gelöscht',
'Event Type Details': 'Details zum Ereignistyp',
'Event Type updated': 'Ereignistyp aktualisiert',
'Event Types': 'Ereignistypen',
'Event added': 'Ereignis hinzugefügt',
'Event deleted': 'Ereignis gelöscht',
'Event registered': 'Ereignis registriert',
'Event updated': 'Ereignis aktualisiert',
'Event': 'Ereignis',
'Events': 'Ereignisse',
'Example': 'Beispiel',
'Exceeded': 'Überschritten',
'Excellent': 'Ausgezeichnet',
'Exclude contents': 'Inhalte ausschließen',
'Excreta disposal': 'Entsorgung von Exkrementen',
'Execute a pre-planned activity identified in <instruction>': 'Ausführen einer vorausgeplanten Aktivität, identifiziert in <instruction>',
'Exercise': 'Übung',
'Exercise?': 'Übung?',
'Exercises mean all screens have a watermark & all notifications have a prefix.': 'Übungen bedeuten, dass alle Anzeigen eine Wassermarke & alle Benachrichtigungen ein Präfix haben.',
'Existing Placard Type': 'Vorhandener Plakattyp',
'Existing food stocks': 'Vorhandener Lebensmitelvorrat',
'Existing location cannot be converted into a group.': 'Vorhandener Standort kann nicht in eine Gruppe transformiert werden.',
'Exits': 'Ausgänge',
'Experience': 'Erfahrung',
'Expiration Date': 'Ablaufdatum',
'Expiration Report': 'Ablaufbericht',
'Expired?': 'Abgelaufen?',
'Expiring Staff Contracts Report': 'Berichte zu ablaufenden Mitarbeiterverträgen',
'Expiry Date': 'Ablaufdatum',
'Expiry (month)': 'Ablauf (Monat)',
'Expiry (months)': 'Ablauf (Monate)',
'Explosive Hazard': 'Explosionsgefahr',
'Export as': 'Exportieren als',
'Export Data': 'Daten exportieren',
'Export Database as CSV': 'Datenbank als CSV exportieren',
'Export in GPX format': 'Als GPX Format exportieren',
'Export in KML format': 'Als KML Format exportieren',
'Export in OSM format': 'Als OSM Format exportieren',
'Export in PDF format': 'In PDF Format exportieren',
'Export in RSS format': 'In RSS Format exportieren',
'Export in XLS format': 'In XLS Format exportieren',
'Exterior Only': 'Nur Externe',
'Exterior and Interior': 'Externe und Interne',
'External': 'Extern',
'Eye Color': 'Augenfarbe',
'Facebook Channels': 'Facebook Kanäle',
'Facial hair, color': 'Gesichtsbehaarung, Farbe',
'Facial hair, type': 'Gesichtsbehaarung, Art',
'Facial hear, length': 'Gesichtsbehaarung, Länge',
'Facility': 'Einrichtung',
'Facilities': 'Einrichtungen',
'Facility Contact': 'Kontakt für Einrichtung',
'Facility Details': 'Details zur Einrichtung',
'Facility Operations': 'Einrichtungsmanagement',
'Facility Status': 'Status der Einrichtung',
'Facility Type': 'Einrichtungstyp',
'Facility Types': 'Einrichtungstypen',
'Facility added': 'Einrichtung hinzugefügt',
'Facility or Location': 'Einrichtung oder Standort',
'Facility removed': 'Einrichtung entfernt',
'Facility updated': 'Einrichtung aktualisiert',
'Facility': 'Einrichtung',
'Fail': 'Fehlgeschlagen',
'Failed!': 'Fehlgeschlagen!',
'Fair': 'Mäßig',
'Falling Object Hazard': 'Gefahr durch herabstürzende Objekte',
'Families/HH': 'Familien/HH',
'Family tarpaulins received': 'Familien hat Planen erhalten',
'Family tarpaulins, source': 'Herkunft der Planen für Familie',
'Family': 'Familie',
'Family Members': 'Familienmitglieder',
'Family Member Details': 'Details zum Familienmitglied',
'Family Member added': 'Familienmitglied hinzugefügt',
'Family Member updated': 'Familienmitglied aktualisiert',
'Family Member removed': 'Familienmitglied entfernt',
'Family Reunification': 'Familienzusammenführung',
'Family Role': 'Familienrolle',
'Family Transferable': 'Familie Transferierbar',
'Family/friends': 'Familie/Freunde',
'Farmland/fishing material assistance, Rank': 'Ackerland/Materialhilfe für Fischerei, Rang',
'Fatalities': 'Verstorbene',
'Father': 'Vater',
'Feature Layer added': 'Objekt-Layer hinzugefügt',
'Feature Layer deleted': 'Objekt-Layer gelöscht',
'Feature Layer updated': 'Objekt-Layer aktualisiert',
'Feature Layers': 'Objekt-Ebenen',
'Feature Namespace': 'Namespace des Objekts',
'Feature Request': 'Objekt-Anfrage',
'Feature Type': 'Objektart',
'Features Include': 'Beinhaltete Objekte',
'Federal State': 'Bundesland',
'Feeds': 'Newsfeeds',
'Female headed households': 'Weiblich geführte Haushalte',
'Female': 'Weiblich',
'Few': 'Wenige',
'Field Hospital': 'Feldlazarett',
'Field': 'Feld',
'File': 'Datei',
'Fill in Latitude': 'Geben Sie den Breitengrad ein',
'Fill in Longitude': 'Geben Sie den Längengrad ein',
'Filter Options': 'Filteroptionen',
'Filter by Tag': 'Nach Tag filtern',
'Filter by Location': 'Nach Standort filtern',
'Filter by Organization': 'Nach Organisation filtern',
'Filter by Date': 'Nach Datum filtern',
'Filter Field': 'Filter Feld',
'Filter Tweets by the date they were tweeted on': 'Filtere Tweets nach dem Datum der Sendung',
'Filter Tweets by who tweeted them': 'Filtere Tweets nach sendender Person',
'Filter Value': 'Filter Wert',
'Find Dead Body Report': 'Suche Leichenbericht',
'Find Hospital': 'Krankenhaus finden',
'Find Person Record': 'Personendatensatz finden',
'Find Volunteers': 'Freiwillige finden',
'Find a Person Record': 'Suche einen Personendatensatz',
'Find': 'Suchen',
'Fingerprint': 'Fingerabdruck',
'Fingerprinting': 'Fingerabdrücke machen',
'Fingerprints': 'Fingerabdrücke',
'Finished Jobs': 'Erledigte Jobs',
'Fire suppression and rescue': 'Feuer - Eindämmung und Rettung',
'Fire': 'Feuer',
'First': 'Erste',
'First Name': 'Vorname',
'First name': 'Vorname',
'Fishing': 'Fischerei',
'flag': 'Flagge',
'flags': 'Flaggen',
'Flags': 'Flaggen',
'Flash Flood': 'Sturzflut',
'Flash Freeze': 'Schockfrost',
'Flexible Impact Assessments': 'Flexible Folgenabschätzungen',
'Flood Alerts show water levels in various parts of the country': 'Flut Alarme zeigen Wasserstände in verschiedenen Teilen des Landes.',
'Flood Alerts': 'Flut Alarme',
'Flood Depth': 'Fluthöhe',
'Flood Report Details': 'Details zum Flutbericht',
'Flood Report added': 'Flutbericht hinzugefügt',
'Flood Report deleted': 'Flutbericht gelöscht',
'Flood Report updated': 'Flutbericht aktualisiert',
'Flood Report': 'Flutbericht',
'Flood Reports': 'Flutberichte',
'Flood': 'Flut',
'Flow Status': 'Status des Ablaufs',
'fluent': 'fliessend',
'Fog': 'Nebel',
'Folder': 'Ordner',
'Follow up': 'Wiedervorlage',
'Follow-up required': 'Wiedervorlage erforderlich',
'Food Supply': 'Lebensmittelversorgung',
'Food assistance': 'Lebensmittel Hilfe',
'Food': 'Lebensmittel',
'Food Distribution': 'Essenausgabe',
'Footer file %s missing!': 'Fußzeile Datei %s fehlt!',
'Footer': 'Fußzeile',
'For a country this would be the ISO2 code, for a Town, it would be the Airport Locode.': 'Für eine Land wäre dies der ISO2-Code, für eine Stadt wäre es der Flughafen Code.',
'For each sync partner, there is a default sync job that runs after a specified interval of time. You can also set up more sync jobs which could be customized on your needs. Click the link on the right to get started.': 'Für jeden Sync-Partner gibt es einen standard Sync Job, der nach einem vordefiniertem Zeitintervall ausgeführt wird. Sie können auch mehrere Sync Jobs festlegen welche nach ihren Anforderungen entsprechend ausgeführt werden. Klicken Sie auf den Link rechts um zu beginnen.',
'For enhanced security, you are recommended to enter a username and password, and notify administrators of other machines in your organization to add this username and password against your UUID in Synchronization -> Sync Partners': 'Für erweiterte Sicherheit empfiehlt sich die Eingabe eines Benutzernamens und Passworts. Bitte benachrichtigen Sie die Administratoren der anderen Geräte in Ihrem Unternehmen damit diese die Zugangsdaten unter dem Punkt Synchronization -> Sync-Partner einrichten.',
'For live help from the Sahana community on using this application, go to': 'Für direkte Hilfe von der Sahana Community zur Anwendung dieses Programmes, gehen Sie zu',
'For messages that support alert network internal functions': 'Für Nachrichten, die Netzwerkswarnungen interner Funktionen unterstützen',
'For more details on the Sahana Eden system, see the': 'Weitere Informationen zum Sahana Eden System finden Sie unter',
'For more information, see': 'Weitere Informationen finden Sie unter',
'For': 'Für',
'Forest Fire': 'Waldbrand',
'Formal camp': 'Offizielles Camp',
'Forms': 'Formulare',
'Found': 'Gefunden',
'Foundations': 'Stiftungen',
'Free for domestic animals': 'Haustiere zugelassen',
'Freezing Drizzle': 'Gefrierender Nieselregen',
'Freezing Rain': 'Gefrierender Regen',
'Freezing Spray': 'Kältespray',
'French': 'Französisch',
'Friday': 'Freitag',
'From Adress': 'Herkunftsadresse',
'From Address': 'Herkunftsadresse',
'From Facility': 'Von Einrichtung',
'From Inventory': 'Aus dem Bestand',
'From Location': 'Vom Standort',
'From Organization': 'Von der Organisation',
'From': 'Von',
'From ': 'Von ',
'Fulfil. Status': 'Status der Bedarfsdeckung',
'Fulfill Status': 'Status der Bedarfsdeckung',
'Fulfillment Status': 'Auftragserfüllungsstatus',
'Full beard': 'Vollbart',
'Full': 'vollständig, voll, ganz',
'Fullscreen Map': 'Großbild Karte',
'Functions available': 'Verfügbare Funktionen',
'Funding': 'Finanzierung',
'Funding Organization': 'Finanzierende Organisation',
'Funeral': 'Beerdigung',
'Further Action Recommended': 'Weitere Aktivität empfohlen',
'Appointments with future dates can not be marked as completed': 'Termine mit Datum in der Zukunft können nicht als beendet markiert werden',
'GIS Reports of Shelter': 'GIS-Berichte der Unterkünfte',
'GIS integration to view location details of the Shelter': 'GIS-Integration um Details zum Standort der Unterkunft zu erhalten',
"Google Earth's Keyhole Markup Language": "Google Earth's Keyhole Markup Language",
'GPS Marker': 'GPS Markierung/Symbol',
'GPS Track File': 'GPS Track Datei',
'GPS Track': 'GPS Track',
'GPX Track': 'GPX Track',
'GPS eXchange format': 'GPS Geräte Austauschformat',
'Gap Analysis Map': 'Karte zur Lückenanalyse',
'Gap Analysis Report': 'Bericht zur Lückenanalyse',
'Gap Analysis': 'Lückenanalyse',
'Gap Map': 'Lückenkarte',
'Gap Report': 'Bericht über Lücken',
'Gateway Settings': 'Gateway-Einstellungen',
'Gateway settings updated': 'Gateway-Einstellungen aktualisiert',
'Gateway': 'Gateway',
'Gender': 'Geschlecht',
'General Comment': 'Allgemeine Bemerkung',
'General Medical/Surgical': 'Allgemein - Medizinisch/Chirurgisch',
'General emergency and public safety': 'Allgemein - Notfall und öffentliche Sicherheit',
'General information on demographics': 'Allgemein - Informationen zur Demographie',
'General': 'Allgemein',
'Geocode': 'Geocodierung',
'Geocoder Selection': 'Geocoder Auswahl',
'Geometry Name': 'Name der Geometrie',
'Geophysical (inc. landslide)': 'Geophysikalisch (inc. Erdrutsch)',
'Geotechnical Hazards': 'Geotechnische Gefahren',
'Geotechnical': 'Geotechnisch',
'Geraldo module not available within the running Python - this needs installing for PDF output!': 'Das Modul Geraldo steht innerhalb dier aktiven Python Umgebung nicht zur Verfügung - für die PDF-Ausgabe muss es nachinstalliert werden.',
'German': 'Deutsch',
'Get incoming recovery requests as RSS feed': 'Empfangen von eingehenden Bergungsanforderungen als RSS-Feed',
'Give a brief description of the image, e.g. what can be seen where on the picture (optional).': 'Kurze Beschreibung des Bildes, z. B. was ist wo auf dem Bild zu sehen ist (nicht verpflichtend).',
'Give information about where and when you have seen them': 'Geben Sie Information wo und wann Sie sie gesehen haben',
'Global Messaging Settings': 'Globale Nachrichteneinstellungen',
'Go to Request': 'Zur Anfrage',
'Go': 'Los',
'Goatee': 'Spitzbart',
'Good Condition': 'Guter Zustand',
'Good': 'Gut',
'Goods Received Note': 'Warenempfangsbestätigung',
'Government UID': 'Regierungs-UID',
'Government building': 'Regierungsgebäude',
'Government District': 'Regierungsbezirk',
'Government': 'Regierung',
'Grade': 'Klasse',
'Greek': 'Griechisch',
'Green': 'Grün',
'GRN': 'GRN',
'GRN Number': 'GRN Nummer',
'Ground movement, fissures': 'Untergrundbewegung, Risse',
'Ground movement, settlement, slips': 'Untergrundbewegung, Bodensenkung, Abrutsche',
'Group Description': 'Gruppenbeschreibung',
'Group Details': 'Gruppendetails',
'Group Head': 'Gruppenleiter',
'Group Member added': 'Gruppenmitglied hinzugefügt',
'Group Members': 'Gruppenmitglieder',
'Group Memberships': 'Gruppenzugehörigkeiten',
'Group Name': 'Gruppenname',
'Group Size': 'Gruppengröße',
'Group Size Day': 'Gruppengröße Tag',
'Group Size Night': 'Gruppengröße Nacht',
'Group Title': 'Gruppentitel',
'Group Type': 'Gruppentyp',
'Group added': 'Gruppe hinzugefügt',
'Group deleted': 'Gruppe gelöscht',
'Group description': 'Gruppenbeschreibung',
'Group updated': 'Gruppe aktualisiert',
'Group': 'Gruppe',
'Grouped by': 'Gruppiert nach',
'Groups removed': 'Gruppen entfernt',
'Groups': 'Gruppen',
'GU Done': 'GU erledigt',
'Guest': 'Gast',
'HR Manager': 'Personalmanager',
'Hail': 'Hagel',
'Hair Color': 'Haarfarbe',
'Hair Length': 'Haarlänge',
'Hair Style': 'Haarschnitt',
'Has data from this Reference Document been entered into Sahana?': 'Wurden Daten von diesem Referenzdokument in Sahana eingetragen?',
'Has the Certificate for receipt of the shipment been given to the sender?': 'Wurde das Zertifikat für den Empfang der Lieferung an den Absender übergeben?',
'Has the GRN (Goods Received Note) been completed?': 'Wurde die Warenempfangsmeldung (GRN) ausgefüllt?',
'Hazard Pay': 'Gefahrenzulage',
'Hazardous Material': 'Gefahrgut',
'Hazardous Road Conditions': 'Gefährliche Strassenverhältnisse',
'Header Background': 'Hintergrund der Kopfzeile',
'Header background file %s missing!': 'Hintergrund der Kopfzeile Datei %s fehlt!',
'Headquarters': 'Hauptquartiere',
'Head of Family': 'Familienoberhaupt',
'Health care assistance, Rank': 'Unterstützung Gesundsheitspflege, Rang',
'Health center with beds': 'Gesundheitszentrum mit Betten',
'Health center without beds': 'Gesundheitszentrum ohne Betten',
'Health center': 'Gesundheitszentrum',
'Health services status': 'Status des Gesundheitswesens',
'Health': 'Gesundheit',
'Healthcare Worker': 'Arbeiter im Gesundheitswesen',
'Heat Wave': 'Hitzewelle',
'Heat and Humidity': 'Wärme und Feuchtigkeit',
'Height': 'Höhe',
'Height (cm)': 'Höhe (cm)',
'Height (m)': 'Höhe (m)',
'Height': 'Höhe',
'Heliports': 'Hubschrauberlandeplätze',
'HELP': 'HILFE',
'Help': 'Hilfe',
'Help Wanted': 'Hilfe benötigt',
'Helps to monitor status of hospitals': 'Hilfe um den Status von Krankenhäusern zu überwachen',
'Helps to report and search for missing persons': 'Hilfe beim Melden von und bei der Suche nach vermissten Personen',
'Here are the solution items related to the problem.': 'Hier sind die mit diesem Problem verbundenen Lösungselemente.',
'Heritage Listed': 'Erbe aufgelistet',
'Hide': 'Verstecken',
'Hierarchy': 'Hierarchie',
'Hierarchy Level 0 Name (i.e. Country)': 'Hierachiestufe 0 Name (d.h. Land)',
'Hierarchy Level 1 Name (e.g. State or Province)': 'Hierachiestufe 1 Name (z. B. Land oder Provinz / Gebiet)',
'Hierarchy Level 2 Name (e.g. District or County)': 'Hierachiestufe 2 Name (z. B. Bezirk)',
'Hierarchy Level 3 Name (e.g. City / Town / Village)': 'Hierachiestufe 3 Name (z. B. Ort / Stadt / Dorf)',
'Hierarchy Level 4 Name (e.g. Neighbourhood)': 'Hierachiestufe 4 Name (z.B. Nachbarschaft)',
'Hierarchy Level 5 Name': 'Hierarchie Stufe 5 Name',
'High Tide Depth': 'Tiefe bei maximaler Tide',
'High Water': 'Hochwasser',
'High': 'Hoch',
'Highest Priority Open Requests': 'Offene Anfragen höchster Priorität',
'History': 'Geschichte',
'Hit the back button on your browser to try again.': 'Verwenden Sie die Back Schaltfläche ihres Browsers um es erneut zu versuchen.',
'Holiday Address': 'Urlaubsadresse',
'Home Address': 'Heimatsadresse',
'Home Country': 'Land des Wohnsitzes',
'Home Crime': 'Häusliche Kriminalität',
'Home': 'Startseite',
'Hospital Details': 'Details zum Krankenhaus',
'Hospital Status Report': 'Statusbericht zum Krankenhaus',
'Hospital information added': 'Krankenhausinformationen hinzugefügt',
'Hospital information deleted': 'Krankenhausinformationen gelöscht',
'Hospital information updated': 'Krankenhausinformationen aktualisiert',
'Hospital status assessment.': 'Beurteilung des Zustand des Krankenhauses',
'Hospital': 'Krankenhaus',
'Hospitals': 'Krankenhäuser',
'Hour': 'Stunde',
'Hours': 'Stunden',
'Hours by': 'Stunden gem.',
'Hours by Program Import': 'Stunden gem. Programm Import',
'Hours by Program Report': 'Stunden nach Programmbericht',
'Hours by Role Import': 'Stunden gem. Rollen Import',
'Hours by Role Report': 'Stunden nach Rollenbericht',
'Household kits received': 'Haushaltsbausätze (-kits) erhalten',
'Household kits, source': 'Herkunft der Haushaltbausätze (-kits)',
'Housing Unit': 'Unterkunftseinheit',
'Housing Unit Capacity': 'Maximale Belegungzahl für Unterkunftseinheit',
'Housing Unit Day and Night Capacity': 'Maximale Tag und Nacht Belegungszahl für Unterkunftseinheit',
'Housing Unit Name': 'Name der Unterkunftseinheit',
'Housing Units': 'Unterkunftseinheiten',
'How does it work?': 'Wie funktioniert das?',
'How is this person affected by the disaster? (Select all that apply)': 'Wie ist diese Person von der Katastrophe betroffen? (Wählen Sie alles Zutreffende aus)',
'How long will the food last?': 'Wie lange werden die Lebensmittel reichen?',
'How many Boys (0-17 yrs) are Dead due to the crisis': 'Wie viele Jungen (0-17 Jahre) sind durch die Krise umgekommen',
'How many Boys (0-17 yrs) are Injured due to the crisis': 'Wie viele Jungen (0-17 Jahre) sind durch die Krise verletzt worden',
'How many Boys (0-17 yrs) are Missing due to the crisis': 'Wie viele Jungen (0-17 Jahre) sind aufgrund der Krise verschollen',
'How many Girls (0-17 yrs) are Dead due to the crisis': 'Wieviele Mädchen (0-17 Jahre) sind durch die Krise umgekommen',
'How many Girls (0-17 yrs) are Injured due to the crisis': 'Wieviele Mädchen (0-17 Jahre) sind durch die Krise verletzt worden',
'How many Girls (0-17 yrs) are Missing due to the crisis': 'Wieviele Mädchen (0-17 Jahre) sind aufgrund der Krise verschollen',
'How many Men (18 yrs+) are Dead due to the crisis': 'Wieviele Männer (18 Jahre+) sind durch die Krise umgekommen',
'How many Men (18 yrs+) are Injured due to the crisis': 'Wie viele Männer (18 + Jahre) wurden wegen der Krise verletzt',
'How many Men (18 yrs+) are Missing due to the crisis': 'Wie viele Männer (18 + Jahre) sind aufgrund der Krise verschollen',
'How many Women (18 yrs+) are Dead due to the crisis': 'Wieviele Frauen (18+ Jahre) sind durch die Krise umgekommen',
'How many Women (18 yrs+) are Injured due to the crisis': 'Wieviele Frauen (18+ Jahre) wurden wegen der Krise verletzt',
'How many Women (18 yrs+) are Missing due to the crisis': 'Wie viele Frauen (18 Jahre und älter) sind aufgrund der Krise verschollen',
'How many days will the supplies last?': 'Wie viele Tage werden die Waren reichen?',
'How many external (Hospital / Police)': 'Wieviele außerhalb (Krankenhaus/Polizei)',
'How many free places': 'Wieviele freie Plätze',
'How many in BEA (except in PX)': 'Wieviele in BEA (ohne die in PX)',
'How many in BEA (total)': 'Wieviele in BEA (gesamt)',
'How many in PX': 'Wieviele in PX',
'How many new cases have been admitted to this facility in the past 24h?': 'Wie viele neue Fälle wurden während der letzten 24 Stunden dieser Einrichtung zugewiesen?',
'How many of the patients with the disease died in the past 24h at this facility?': 'Wie viele der Patienten mit dieser Krankheit sind in den letzten 24 Stunden in dieser Einrichtung gestorben?',
'How many patients with the disease are currently hospitalized at this facility?': 'Wieviele Patienten mit dieser Krankheit sind momentan in dieser Einrichtung in Behandlung?',
'How much detail is seen. A high Zoom level means lot of detail, but not a wide area. A low Zoom level means seeing a wide area, but not a high level of detail.': 'Wie viele Details sind sichtbar. Eine hohe Zoom-Stufe bedeutet viele Details, aber keine gute Übersicht. Eine niedrige Zoom-Stufe führt zu einer guten Übersicht, es fehlen aber die Details.',
'Hub': 'Zentrum',
'Human Resource Details': 'Details zur Personalressource',
'Human Resource Management': 'Management der Personalressourcen',
'Human Resource added': 'Personalressource hinzugefügt',
'Human Resource removed': 'Personalressource entfernt',
'Human Resource updated': 'Personalressource aktualisiert',
'Human Resource': 'Personalressource',
'Human Resources': 'Personalressourcen',
'Humanitarian NGO': 'Humanitäre NGO',
'Humanitarian Use': 'Humanitäre Zwecke',
'Hurricane Force Wind': 'Wind in Hurrikanstärke',
'Hurricane': 'Wirbelsturm',
'Hygiene kits received': 'Hygienekits empfangen',
'Hygiene kits, source': 'Herkunft der Hygienekits',
'Hygiene practice': 'Hygienepraxis',
'Hygiene problems': 'Hygieneprobleme',
'I am available in the following area(s)': 'Ich stehe in folgenden Bereichen zur Verfügung',
'IATA': 'IATA',
'ICAO': 'ICAO',
'ID Tag Number': 'Identifikations-Etikett-Nummer',
'ID Tag': 'Identifikationsetikett',
'ID Type': 'ID-Typ',
'Ice Pressure': 'Eisdruck',
'Iceberg': 'Eisberg',
'Identification Report': 'Indentifizierungsbericht',
'Identification Reports': 'Identifizierungsberichte',
'Identification Status': 'Status der Identifizierung',
'Identification': 'Identifizierung',
'Identified as': 'Identifiziert als',
'Identified by': 'Identifiziert durch',
'Identity Details': 'Details zur Identität',
'Identity added': 'Identität hinzugefügt',
'Identity deleted': 'Identität gelöscht',
'Identity updated': 'Identität aktualisiert',
'Identity': 'Identität',
'If a ticket was issued then please provide the Ticket ID.': 'Wenn ein Ticket ausgestellt wurde, bitte die Ticket-ID angeben.',
'If a user verifies that they own an Email Address with this domain, the Approver field is used to determine whether & by whom further approval is required.': 'Wenn ein Benutzer sicherstellt, dass er oder sie eine Email-Adresse in dieser Domäne besitzt, wird das Approver Feld dazu verwendet, um zu bestimmen ob und von wem weitere Genehmigungen erforderlich sind.',
'If it is a URL leading to HTML, then this will downloaded.': 'Handelt es sich um eine URL zu einer HTML Seite, dann wird diese heruntergeladen.',
'If neither are defined, then the Default Marker is used.': 'Wenn nichts davon definiert wurde, wird der Standard Marker (Symbol) verwendet.',
'If no marker defined then the system default marker is used': 'Wenn keine Markierung (Symbolisierung) definiert ist dann wird die im System festgelegte Standardmarkierung verwendet',
'If no, specify why': 'Wenn nein, geben Sie bitte einen Grund dafür an',
'If none are selected, then all are searched.': 'Wird keine ausgewählt, werden alle durchsucht.',
'If the location is a geographic area, then state at what level here.': 'Wenn der Ort ein geographisches Gebiet ist, geben Sie bitte eine entsprechende Stufe an',
'If the request type is "Other", please enter request details here.': 'Wenn der Anfragetyp "Andere" ist, geben Sie bitte hier weitere Details zur Anfrage ein.',
'If this field is populated then a user with the Domain specified will automatically be assigned as a Staff of this Organization': 'Wenn dieses Feld ausgefüllt ist, dann wird ein Benutzer mit der gleichen Domainadresse automatisch als Mitarbeiter dieser Organisation zugeordnet.',
'If this is set to True then mails will be deleted from the server after downloading.': "Wenn dies auf 'Wahr' gesetzt ist, dann werden die Mails nach dem Herunterladen vom Server gelöscht.",
'If this record should be restricted then select which role is required to access the record here.': 'Wenn der Zugriff auf diesen Datensatz beschränkt werden soll, wählen Sie hier die Rolle aus, die für den Zugriff erforderlich ist.',
'If this record should be restricted then select which role(s) are permitted to access the record here.': 'Wenn dieser Eintrag beschränkt werden soll, dann wählen Sie hier aus, welche Rolle(n) für den Zugriff auf den Eintrag berechtigt sind.',
'If yes, specify what and by whom': 'Wenn ja, geben Sie an, was und von wem',
'If yes, which and how': 'Wenn ja, welche und wie',
'If you do not enter a Reference Document, your email will be displayed to allow this data to be verified.': 'Wenn Sie kein Referenzdokument angeben, wird stattdessen ihre Mailadresse angezeigt damit die Daten verifiziert werden können.',
'If you know what the Geonames ID of this location is then you can enter it here.': 'Wenn sie die Geonames ID des Standortes wissen, dann können Sie diese hier eingeben.',
'If you know what the OSM ID of this location is then you can enter it here.': 'Wenn sie die OSM ID dieser des Standortes wissen, dann können Sie diese hier eingeben.',
'If you need to add a new document then you can click here to attach one.': 'Wenn sie ein neues Dokument hinzufügen wollen, dann können sSie hier Klicken um eines anzufügen.',
'If you want several values, then separate with': 'Wenn Sie mehrere Werte möchten, dann trennen Sie diese mit',
'If you would like to help, then please': 'Wenn Sie helfen möchten, dann bitte',
'Ignore Errors?': 'Fehler ignorieren?',
'Illegal Immigrant': 'Illegaler Einwanderer',
'Illiterate': 'Analphabet',
'illiterate': 'Analphabet',
'Image Details': 'Details zum Bild',
'Image Tags': 'Tags für Bild',
'Image Type': 'Typ des Bilds',
'Image Upload': 'Bild hochladen',
'Image added': 'Bild hinzugefügt',
'Image deleted': 'Bild gelöscht',
'Image updated': 'Bild aktualisiert',
'Image': 'Bild',
'Imagery': 'Bilddaten',
'Images': 'Bilder',
'Impact Assessments': 'Folgenabschätzung',
'Impact Details': 'Details zur Folge/Auswirkung',
'Impact Type Details': 'Details zum Typ der Auswirkung',
'Impact Type added': 'ATyp der Auswirkung hinzugefügt',
'Impact Type deleted': 'Typ der Auswirkung gelöscht',
'Impact Type updated': 'Typ der Auswirkung aktualisiert',
'Impact Type': 'Auswirkungsarten',
'Impact Types': 'Auswirkungsarten',
'Impact added': 'Auswirkung hinzugefügt',
'Impact deleted': 'Auswirkung gelöscht',
'Impact updated': 'Auswirkung aktualisiert',
'Impacts': 'Auswirkungen',
'Import & Export Data': 'Import & Export von Daten',
'Import Catalog Items': 'Importiere Katalogartikel',
'Import Data': 'Import von Daten',
'Import Event Types': 'Importiere Ereignistypen',
'Import File': 'Datei importieren',
'Import Heliports': 'Hubschrauberlandeplätze importieren',
'Import Incident Types': 'Ereignistypen importieren',
'Import Locations': 'Gebiete/Standorte importieren',
'Import Projects': 'Projekte importieren',
'Import Staff': 'Mitarbeiter importieren',
'Import Suppliers': 'Lieferanten importieren',
'Import Training Participants': 'Kursteilnehmer importieren',
'Import Users': 'Import von Benutzern',
'Import Volunteers': 'Freiwillige importieren',
'Import Warehouse Stock': 'Warenlagerbestand importieren',
'Import Warehouses': 'Warenlager importieren',
'Import and Export': 'Import und Export',
'Import from CSV': 'Import einer CSV-Datei',
'Import from OpenStreetMap': 'Import aus OpenStreetMap',
'Import from Ushahidi Instance': 'Import aus Ushahidi Instanz',
'Import Hours': 'Import Stundenliste',
'Import if Master': 'Import wenn Master',
'Import multiple tables as CSV': 'Mehrere Tabellen als CSV importieren',
'Import Participant List': 'Import Teilnehmerliste',
'Import Updates': 'Aktualisierungen importieren',
'Import Template Layout': 'Import Vorlagenlayout',
'Import Templates': 'Import Vorlagen',
'Import': 'Import',
'Important': 'Wichtig',
'Importantly where there are no aid services being provided': 'Bedeutsam wo keine Hilfsleistungen angeboten werden',
'Importing data from spreadsheets': 'Importieren von Daten aus Tabellendokumenten',
'Improper decontamination': 'Unzureichende Dekontamination',
'Improper handling of dead bodies': 'Unzureichende Behandlung von Leichen',
'Inactive': 'Inaktiv',
'Inactive/Disappeared': 'Inaktiv/Untergetaucht',
'In Catalogs': 'In Katalogen',
'In Inventories': 'In den Beständen',
'In Process': 'In Bearbeitung',
'In Progress': 'In Bearbeitung',
'In Window layout the map maximises to fill the window, so no need to set a large value here.': 'Beim Aufbau des Fensters wird die Karte maximiert um das Fenster auszufüllen, daher ist es nicht notwendig hier einen grossen Wert festzulegen.',
'Inbound Mail Settings': 'Eingehende Mail-Einstellungen',
'InBox': 'Eingang',
'Incident Categories': 'Kategorien für Vorfälle ',
'Incident Report Details': 'Details zum Vorfall-Bericht',
'Incident Report added': 'Vorfall-Bericht hinzugefügt',
'Incident Report deleted': 'Vorfall-Bericht gelöscht',
'Incident Report updated': 'Vorfall-Bericht aktualisiert',
'Incident Report': 'Vorfall-Bericht',
'Incident Reporting System': 'Vorfall-Berichtsystem',
'Incident Reporting': 'Vorfall-Berichtswesen',
'Incident Reports': 'Vorfall-Berichte',
'Incident': 'Vorfall',
'Incidents': 'Vorfälle',
'Incident Type': 'Vorfallstyp',
'Incident Types': 'Typen von Vorfällen',
'Incident Timeline': 'Zeitplan der Ereignisse',
'Incoming Shipment canceled': 'Eingehende Sendung abgebrochen',
'Incoming Shipment updated': 'Eingehende Sendung aktualisiert',
'Incoming': 'Eingehend',
'Incomplete': 'Unvollständig',
'Individuals': 'Einzelpersonen',
'Indirect support cost HQ': 'Indirekte Unterstützungskosten Hauptquartier',
'Industrial Crime': 'Industrielle Kriminalität',
'Industrial': 'Industriell',
'Industry Fire': 'Industriefeuer',
'Infant (0-1)': 'Säugling (0-1)',
'Infectious Disease (Hazardous Material)': 'Ansteckende Krankheit (gefährliches Material)',
'Infectious Disease': 'Ansteckende Krankheit',
'Infectious Diseases': 'Infektionskrankheiten',
'Infestation': 'Aktivierung',
'Informal Leader': 'Informeller Leiter',
'Informal camp': 'Informelles Camp',
'Information gaps': 'Informationenlücken',
'Infusion catheters available': 'Infusionskatheter verfügbar',
'Infusion catheters need per 24h': 'Benötigte Infusionskatheter pro 24h',
'Infusion catheters needed per 24h': 'Benötigte Infusionskatheter pro 24h',
'Infusions available': 'Infusionen verfügbar',
'Infusions needed per 24h': 'Benötigte Infusionen pro 24h',
'Initials': 'Namenskürzel',
'Inspected': 'Geprüft',
'Inspection Date': 'Prüfdatum',
'Inspection date and time': 'Datum und Uhrzeit der Überprüfung',
'Inspection time': 'Zeit der Überprüfung',
'Inspector ID': 'Prüfer-ID',
'Instant Porridge': 'Hafer Fertigbrei',
'Institution': 'Institution',
'Instructions': 'Anweisungen',
'Instructions for handling of the case': 'Anweisungen zur Handhabung des Falls',
'Instructor': 'Ausbilder',
'Insufficient vars: Need module, resource, jresource, instance': 'Unzureichende vars: Benötige module, resource, jresource, instance',
'Insufficient': 'Nicht ausreichend',
'Insufficient Privileges': 'Fehlende Berechtigung',
'Intake Items': 'Annahme Güter',
'Integrated bath within housing unit': 'Bad in der Unterkunftseinheit vorhanden',
'Integrated shower within housing unit': 'Dusche in der Unterkunftseinheit vorhanden',
'Intergovernmental Organization': 'Zwischenstaatliche Organisation',
'Interior walls, partitions': 'Innere Wände, Partitionen',
'Internal Resources': 'Interne Ressourcen',
'Internal Resource': 'Interne Ressource',
'Internal Shipment': 'Interne Lieferung',
'Internal State': 'Interner Zustand',
'International NGO': 'Internationale NGO',
'International Organization': 'Internationale Organisation',
'interpreter required': 'Dolmetscher erforderlich',
'Interview taking place at': 'Ort des Interviews',
'inv Home Page': 'inv Homepage',
'Invalid Case': 'Ungültiger Fall',
'Invalid Cases': 'Ungültige Fälle',
'Invalid Query': 'Ungültige Abfrage',
'Invalid request!': 'Ungültige Anfrage!',
'Invalid ticket': 'Ungültiges Ticket',
'Invalid': 'Ungültig',
'Inventories': 'Bestände',
'Inventory': 'Bestand',
'Inventory Item Details': 'Details zu einzelnem Bestandsartikel',
'Inventory Item updated': 'Bestandsartikel aktualisiert',
'Inventory Item': 'Bestandsartikel',
'Inventory Items include both consumable supplies & those which will get turned into Assets at their destination.': 'Bestandsartikel umfassen sowohl Verbrauchsmaterialien als auch solche die am Bestimmungsort in Anlagen umgewandelt werden.',
'Inventory Items': 'Bestandsartikel',
'Inventory Management': 'Lagerbestandsverwaltung',
'Inventory of Effects': 'Bestand von Vermögenswerten',
'Is editing level L%d locations allowed?': 'Ist die Bearbeitung von Level L%d Standorten zulässig?',
'Is it safe to collect water?': 'Ist es sicher Wasser zu sammeln?',
'Is this a strict hierarchy?': 'Ist dies eine strenge Hierarchie?',
'Issuing Authority': 'Ausstellende Behörde',
'It captures not only the places where they are active, but also captures information on the range of projects they are providing in each area.': 'Es erfasst nicht nur die Orte wo sie aktiv sind, sondern erfasst auch Informationen über den Umfang der Projekte die sie im jeweiligen Gebiet durchführen.',
'Item Added to Shipment': 'Artikel der Lieferung hinzugefügt',
'Item Catalog Details': 'Details zum Artikelkatalog',
'Item Categories': 'Artikelkategorien',
'Item Category Details': ' Details zur Artikelkategorie',
'Item Category added': 'Artikelkategorie hinzugefügt',
'Item Category deleted': 'Artikelkategorie gelöscht',
'Item Category updated': 'Artikelkategorie aktualisiert',
'Item Category': 'Artikelkategorie',
'Item Details': 'Details zum Artikel',
'Item Pack Details': 'Details zum Artikelpaket ',
'Item Pack added': 'Artikelpaket hinzugefügt',
'Item Pack deleted': 'Artikelpaket gelöscht',
'Item Pack updated': 'Artikelpaket aktualisiert',
'Item Packs': 'Artikelpaket',
'Item Tracking Status': 'Artikel Verfolgungsstatus',
'Item/Description': 'Artikel/Beschreibung',
'Items/Description': 'Artikel/Beschreibung',
'Item added to Inventory': 'Artikel zum Bestand hinzugefügt',
'Item added to shipment': 'Artikel der Lieferung hinzugefügt',
'Item added': 'Artikel hinzugefügt',
'Item already in Bundle!': 'Artikel bereits in Produktpaket!',
'Item already in Kit!': 'Artikel bereits in Ausstattung (Kit)!',
'Item already in budget!': 'Artikel bereits im Budget!',
'Item deleted': 'Artikel gelöscht',
'Item removed from Inventory': 'Artikel aus dem Bestand entfernt',
'Item updated': 'Artikel aktualisiert',
'Item': 'Artikel',
'Items in Category are Vehicles': 'Artikel in dieser Kategorie sind Fahrzeuge',
'Items in Category can be Assets': 'Artikel in der Kategorie können als Anlagen verwendet werden',
'Items': 'Artikel',
'Japanese': 'Japanisch',
'Jerry can': 'Kanister',
'Jew': 'Jude',
'Jewish': 'Jüdisch',
'Job Role Catalog': 'Katalog für Tätigkeiten',
'Job Role Details': 'Details zur Tätigkeit',
'Job Role added': 'Tätigkeit hinzugefügt',
'Job Role deleted': 'Tätigkeit entfernt',
'Job Role updated': 'Tätigkeit aktualisiert',
'Job Role': 'Tätigkeit',
'Job Roles': 'Tätigkeiten',
'Job Title': 'Berufsbezeichnung',
'Job Title Catalog': 'Katalog der Berufsbezeichnungen',
'Journal Entry Details': 'Details zum Journaleintrag',
'Journal entry added': 'Journaleintrag hinzugefügt',
'Journal entry deleted': 'Journaleintrag gelöscht',
'Journal entry updated': 'Journaleintrag aktualisiert',
'Key Details': 'Details zum Schlüssel',
'Key added': 'Schlüssel hinzugefügt',
'Key deleted': 'Schlüssel gelöscht',
'Key updated': 'Schlüssel aktualisiert',
'Key': 'Schlüssel',
'Keys': 'Schlüssel',
'Kit Contents': 'Inhalt der Ausstattung (Kit)',
'Kit Details': 'Details zur Ausstattung (Kit)',
'Kit Updated': 'Ausstattung (Kit) aktualisiert',
'Kit added': 'Ausstattung (Kit) hinzugefügt',
'Kit deleted': 'Ausstattung (Kit) gelöscht',
'Kit updated': 'Ausstattung (Kit) aktualisiert',
'Kits': 'Ausstattungen (Kits)',
'Kit': 'Ausstattung (Kit)',
'Kit?': 'Ausstattung (Kit)?',
'Kitting': 'Ausstattung zusammenstellen',
'Kittings': 'Ausstattungszusammenstellungen',
'Known Identities': 'Bekannte Identitäten',
'Known incidents of violence against women/girls': 'Bekannte Fälle von Gewalt gegen Frauen/Mädchen',
'Known incidents of violence since disaster': 'Bekannte Fällen von Gewalt seit der Katastrophe',
'LICENSE': 'LIZENZ',
'Lack of material': 'Mangel an Material',
'Lack of school uniform': 'Fehlende Schuluniformen',
'Lack of supplies at school': 'Fehlende Vorräte an der Schule',
'Lack of transport to school': 'Fehlender Transportmöglichkeiten zur Schule',
'Lactating women': 'Stillende frauen',
'Lahar': 'Mure',
'Landslide': 'Erdrutsch',
'Language': 'Sprache',
'Language / Communication Mode': 'Sprache / Verständigungsmodus',
'Last Downloaded': 'Zuletzt heruntergeladen',
'Last Name': 'Nachname',
'Last Pull': 'Letzter Pull',
'Last Push': 'Letzter Push',
'Last known location': 'Letzte bekannte Position',
'Last seen on': 'Zuletzt gesehen am',
'Last synchronization time': 'Zeitpunkt der letzte Synchronisierung',
'Last updated by': 'Letzte Aktualisierung durch',
'Last updated on': 'Letzte Aktualisierung am',
'Last updated': 'Letzte Aktualisierung',
'Last': 'Letzte',
'Last Check-in': 'Letzter Check-in',
'Last Check-out': 'Letzter Check-out',
'Latest Information': 'Aktuelle Informationen',
'Latitude & Longitude': 'Breitengrad und Längengrad',
'Latitude is North-South (Up-Down).': 'Breitengrad ist Nord-Süd (Oben-Unten).',
'Latitude is zero on the equator and positive in the northern hemisphere and negative in the southern hemisphere.': 'Der Breitengrad ist Null am Äquator, Positiv auf der nördlichen und negativ auf der südlichen Erdhalbkugel.',
'Latitude of Map Center': 'Breitengrad der Kartenmitte',
'Latitude of far northern end of the region of interest.': 'Nördlichster Breitengrad der betroffenen Region',
'Latitude of far southern end of the region of interest.': 'Südlichster Breitengrad der betroffenen Region',
'Latitude should be between': 'Breite muss zwischen',
'Latitude': 'Breitengrad',
'Latrines': 'Toiletten',
'Law enforcement, military, homeland and local/private security': 'Executive, Militär und andere lokale/private Sicherheitsagenturen',
'Layer Poperties': 'Kartenebenen anpassen',
'Layer added': 'Layer hinzugefügt',
'Layer deleted': 'Layer gelöscht',
'Layer updated': 'Layer aktualisiert',
'Layer': 'Kartenebene',
'Layers updated': 'Kartenebenen aktualisiert',
'Layers': 'Kartenebenen',
'Leader': 'Leiter',
'Lead Implementer': 'Hauptimplementierer',
'Left Voluntarily': 'Freiwillig ausgereist',
'Legally Departed': 'Legal abgereist',
'Legend Format': 'Format der Legende',
'Legend': 'Legende',
'Length (m)': 'Länge (m)',
'Less Options': 'Weniger Optionen',
'Level of Award': 'Stufe der Auszeichnung',
'Level 1 Assessment Details': 'Stufe 1 Beurteilung - Details',
'Level 1 Assessment added': 'Stufe 1 Beurteilung hinzugefügt',
'Level 1 Assessment deleted': 'Stufe 1 Beurteilung entfernt',
'Level 1 Assessment updated': 'Stufe 1 Beurteilung aktualisiert',
'Level 1 Assessments': 'Stufe 1 Beurteilungen',
'Level 1': 'Stufe 1',
'Level 2 Assessment Details': 'Stufe 2 Beurteilung - Details',
'Level 2 Assessment added': 'Stufe 2 Beurteilung hinzugefügt',
'Level 2 Assessment deleted': 'Stufe 2 Beurteilung entfernt',
'Level 2 Assessment updated': 'Stufe 2 Beurteilung aktualisiert',
'Level 2 Assessments': 'Stufe 2 Beurteilungen',
'Level 2 or detailed engineering evaluation recommended': 'Stufe 2 oder detaillierte technische Evaluierung empfohlen',
'Level 2': 'Stufe 2',
'Level 3': 'Stufe 3',
'Level': 'Stufe',
'Library support not available for OpenID': 'OpenID wird von Bibliothek nicht unterstützt',
'License Plate': 'Nummernschild',
'LineString': 'LineString',
'Link to this result': 'Link zu dieser Liste',
'List / Add Baseline Types': 'Arten von Referenzdaten auflisten / hinzufügen',
'List / Add Impact Types': 'Arten von Auswirkungen auflisten / hinzufügen',
'List / Add Services': 'Leistungen auflisten / hinzufügen',
'List / Add Types': 'Typen auflisten / hinzufügen',
'List Activities': 'Aktivitäten auflisten',
'List All Assets': 'Alle Anlagen auflisten',
'List All Catalog Items': 'Auflisten aller Artikel aus dem Katalog',
'List All Commitments': 'Auflisten aller Zusagen',
'List All Entries': 'Alle Einträgen auflisten',
'List All Item Categories': 'Auflisten aller Artikelkategorien',
'List All Memberships': 'Alle Mitgliedschaften auflisten',
'List All Organization Approvers & Whitelists': 'Zeige alle Organisationsbestätiger & Whitelists',
'List All Received Shipments': 'Auflisten aller empfangenen Lieferungen',
'List All Records': 'Auflisten aller Datensätze',
'List All Requested Items': 'Auflisten aller angefragten Artikel',
'List All Requests': 'Auflisten aller Anfragen',
'List All Roles': 'Zeige alle Rollen',
'List All Sent Shipments': 'Liste aller gesendeten Lieferungen',
'List All Users': 'Zeige alle Nutzer',
'List All Vehicles': 'Liste aller Fahrzeuge',
'List All': 'Alle auflisten',
'List Alternative Items': 'Liste alternativer Artikel',
'List Appointment Types': 'Liste Terminarten',
'List Assessment Summaries': 'Zusammenfassungen der Beurteilungen auflisten',
'List Assessments': 'Beurteilungen auflisten',
'List Assets': 'Anlagen auflisten',
'List Availability': 'Liste Verfügbarkeit',
'List Baseline Types': 'Liste der Typen von Referenzdaten',
'List Baselines': 'Liste der Referenzdaten',
'List Brands': 'Marken auflisten',
'List Budgets': 'Budgets auflisten',
'List Bundles': 'Produktpakete auflisten',
'List Camp Services': 'Liste der Leistungen im Camp',
'List Camp Types': 'Liste Typen von Camps',
'List Camps': 'Liste Camps',
'List Case Flags': 'Fall Flaggen auflisten',
'List Catalog Items': 'Katalogelemente auflisten',
'List Catalogs': 'Liste Kataloge',
'List Certificates': 'Liste Zertifikate',
'List Certifications': 'Liste Zertifizierungen',
'List Checklists': 'Checklisten Auflisten',
'List Cluster Subsectors': 'Cluster Teilbereiche Auflisten',
'List Clusters': 'Cluster Auflisten',
'List Commitment Items': 'Liste zugesagter Artikel',
'List Commitments': 'Liste Zusagen',
'List Competencies': 'Liste Kompetenzen',
'List Competency Ratings': 'Liste Kompetenzrating',
'List Conflicts': 'Liste Konflikte',
'List Contact Information': 'Liste Kontaktinformationen',
'List Contacts': 'Liste Kontakte',
'List Course Certificates': 'Liste Kurszertifikate',
'List Courses': 'Liste Kurse',
'List Credentials': 'Liste von Qualifikationen',
'List Current': 'Aktuelle Liste',
'List Documents': 'Liste Dokumente',
'List Donors': 'Liste Spender',
'List Events': 'Liste Ereignisse',
'List Event Types': 'Liste der Ereignistypen',
'List Facilities': 'Liste Einrichtungen',
'List Family Members': 'Liste Familienmitglieder',
'List Feature Layers': 'Liste Objekt-Layer',
'List Flood Reports': 'Liste Flutberichte',
'List Groups': 'Liste Gruppen',
'List Groups/View Members': 'Liste Gruppen/Anzeige der Mitglieder',
'List Hospitals': 'Liste Krankenhäuser',
'List Human Resources': 'Liste der personellen Ressourcen',
'List Identities': 'Identitäten auflisten',
'List Images': 'Bilder auflisten',
'List Impact Assessments': 'Folgenabschätzung auflisten',
'List Impact Types': 'Auswirkungsarten auflisten',
'List Impacts': 'Auswirkungen auflisten',
'List Incident Reports': 'Vorfallberichte auflisten',
'List Item Categories': 'Liste Artikelkategorien',
'List Item Packs': 'Liste der Artikelpakete',
'List Items in Inventory': 'Liste der Artikel im Bestand',
'List Items': 'Liste der Artikel',
'List Job Roles': 'Liste der Tätigkeiten',
'List Keys': 'Schlüssel auflisten',
'List Kits': 'Liste Ausstattungen (Kits)',
'List Layers': 'Liste Layer',
'List Level 1 Assessments': 'Liste Stufe 1 Beurteilungen',
'List Level 1 assessments': 'Liste Stufe 1 Beurteilungen',
'List Level 2 Assessments': 'Liste Stufe 2 Beurteilungen',
'List Level 2 assessments': 'Liste Stufe 2 Beurteilungen',
'List Locations': 'Standorte auflisten',
'List Log Entries': 'Protokolleinträge auflisten',
'List Map Profiles': 'Liste der Kartenkonfigurationen',
'List Markers': 'Marker/Symbole auflisten',
'List Members': 'Mitglieder auflisten',
'List Memberships': 'Mitgliedschaften auflisten',
'List Messages': 'Nachrichten auflisten',
'List Missing Persons': 'Vermisste Personen auflisten',
'List Missions': 'Liste Aufträge',
'List Need Types': 'Bedarftypen auflisten',
'List Needs': 'Bedarf auflisten',
'List Offices': 'Liste der Büros',
'List Organizations': 'Liste der Organisationen',
'List Peers': 'Liste der Peers',
'List Personal Effects': 'Liste der persönlichen Habe',
'List Persons': 'Liste der Personen',
'List Photos': 'Liste der Bilder',
'List Population Statistics': 'Liste Bevölkerungsstatistiken',
'List Positions': 'Liste der Positionen',
'List Problems': 'Liste der Probleme',
'List Projections': 'Liste der Kartenprojektionen',
'List Projects': 'Liste Projekte',
'List Rapid Assessments': 'Liste Schnell-Beurteilungen',
'List Recurring Requests': 'Liste wiederkehrender Anfragen',
'List Received Items': 'Liste empfangene Artikel',
'List Received Shipments': 'Liste empfangene Lieferungen',
'List Records': 'Liste Datensätze',
'List Registrations': 'Liste Registrierungen',
'List Reports': 'Liste Berichte',
'List Request Items': 'Angefragte Artikel auflisten',
'List Requests': 'Anfragen auflisten',
'List Residents Reports': 'Übersicht Bewohnerlisten',
'List Resources': 'Ressourcen auflisten',
'List Rivers': 'Flüsse auflisten',
'List Roles': 'Rollen auflisten',
'List Rooms': 'Liste Räume',
'List Scenarios': 'Liste Szenarien',
'List Sections': 'Abschnitte auflisten',
'List Sectors': 'Bereiche auflisten',
'List Sent Items': 'Gesendete Artikel auflisten',
'List Sent Shipments': 'Liste verschickte Lieferungen',
'List Service Profiles': 'Leistungsprofile auflisten',
'List Settings': 'Einstellungen auflisten',
'List Shelter Services': 'Leistungen der Unterkunft auflisten',
'List Shelter Types': 'Typen der Unterkunft auflisten',
'List Shelters': 'Unterkünfte auflisten',
'List Site Needs': 'Alle Bedarfe',
'List Skill Equivalences': 'Liste Fähigkeits-Vergleichbarkeiten',
'List Skill Provisions': 'Fähigkeits-Bereitstellungen auflisten',
'List Skill Types': 'Liste der Typen von Fähigkeiten',
'List Skills': 'Liste Fähigkeiten',
'List Solutions': 'Liste Lösungen',
'List Staff Types': 'Mitarbeitertypen auflisten',
'List Status': 'Status auflisten',
'List Subscriptions': 'Abonnements anzeigen',
'List Subsectors': 'Teilbereiche auflisten',
'List Support Requests': 'Liste der Anfragen nach Unterstützung',
'List Survey Answers': 'Liste Umfrage-Antworten',
'List Survey Questions': 'Liste Umfrage-Fragen',
'List Survey Series': 'Liste Umfrage-Serien',
'List Survey Templates': 'Liste Umfrage-Vorlagen',
'List Tasks': 'Aufgaben auflisten',
'List Teams': 'Teams auflisten',
'List Themes': 'Themen auflisten',
'List Tickets': 'Tickets auflisten',
'List Tracks': 'Tracks auflisten',
'List Trainings': 'Schulungen/Ausbildung auflisten',
'List Units': 'Einheiten auflisten',
'List Users': 'Liste Benutzer',
'List Warehouses': 'Liste Warenlager',
'List all': 'Alle auflisten',
'List available Scenarios': 'Liste verfügbarer Szenarien',
'List of Items': 'Liste der Artikel',
'List of Missing Persons': 'Liste der vermißten Personen',
'List of Peers': 'Liste der Peers',
'List of Reports': 'Liste der Berichte',
'List of Requests': 'Liste der Anfragen',
'List of Spreadsheets uploaded': 'Liste der hochgeladenen Tabellen',
'List of Spreadsheets': 'Liste der Tabellen',
'List of Volunteers for this skill set': 'Liste der Freiwilligen für dieses Fachgebiet',
'List of Volunteers': 'Liste der Freiwilligen',
'List of addresses': 'Liste der Adressen',
'List unidentified': 'Nicht identifizierte Objekte auflisten',
'List': 'Liste',
'List/Add': 'Auflisten/Hinzufügen',
'Lists "who is doing what & where". Allows relief agencies to coordinate their activities': 'Liste "Wer macht was & wo". Ermöglicht Hilfsorganizationen, ihre Aktivitäten zu koordinieren',
'Literacy': 'Schriftkundigkeit',
'literate': 'schriftkundig',
'Live Help': 'Aktuelle Hilfe',
'Livelihood': 'Lebensgrundlage',
'Load Cleaned Data into Database': 'Bereinigte Daten in die Datenbank laden',
'Load Raw File into Grid': 'Unformatierte Datei ins Grid laden',
'Loading': 'Wird geladen',
'Loading Equipment': 'Be-/Entladeaustattung',
'Local Name': 'Lokaler Name',
'Local Names': 'Lokale Namen',
'Location 1': 'Standort 1',
'Location 2': 'Standort 2',
'Location Detail': 'Details zum Gebiet/Standort',
'Location Details': 'Standortdetails',
'Location Hierarchies': 'Standort-Hierachien',
'Location Hierarchy Level 0 Name': 'Standort-Hierachie Level 0 Name',
'Location Hierarchy Level 1 Name': 'Standort-Hierachie Level 1 Name',
'Location Hierarchy Level 2 Name': 'Standort-Hierachie Level 2 Name',
'Location Hierarchy Level 3 Name': 'Standort-Hierarchie Level 3 Name',
'Location Hierarchy Level 4 Name': 'Standort-Hierarchie Level 4 Name',
'Location Hierarchy Level 5 Name': 'Standort-Hierarchie Level 5 Name',
'Location added': 'Standort hinzugefügt.',
'Location deleted': 'Standort gelöscht',
'Location group cannot be a parent.': 'Standortgruppe kann kein übergeordnetes Element sein',
'Location group cannot have a parent.': 'Standortgruppe kann kein übergeordnetes Elemenet haben.',
'Location groups can be used in the Regions menu.': 'Standortgruppen können im Gebietsmenu verwendet werden.',
'Location groups may be used to filter what is shown on the map and in search results to only entities covered by locations in the group.': 'Standortgruppen können genutzt werden, um die Ergebnisse auf der Karte und in den Suchergebnissen zu filtern.',
'Location updated': 'Standort aktualisiert',
'Location': 'Standort',
'Locations of this level need to have a parent of level': 'Standorte dieser Ebene müssen ein übergeordnetes Element der folgenden Ebene haben',
'Locations': 'Standorte',
'Lockdown': 'Sperrung',
'Log Entry Details': 'Details zum Protokolleintrag',
'Log entry added': 'Protokolleintrag hinzugefügt',
'Log entry deleted': 'Protokolleintrag gelöscht',
'Log entry updated': 'Protokolleintrag aktualisiert',
'Log': 'Protokoll',
'Logged By': 'Protokolliert durch',
'Logged in': 'Eingeloggt',
'Logged out': 'Ausgeloggt',
'Login': 'Anmeldung',
'Logistics Management System': 'Logistik Managementsystem',
'Logistics': 'Logistik',
'Logo file %s missing!': 'Datei mit Logo %s fehlt!',
'Logout': 'Abmelden',
'Long Name': 'Langschriftlicher Name',
'Long Text': 'Langer Text',
'Longitude is West - East (sideways).': 'Die Geographische Länge ist West-Ost (seitlich).',
'Longitude is West-East (sideways).': 'Die Geographische Länge ist West-Ost (seitlich).',
'Longitude is zero on the prime meridian (Greenwich Mean Time) and is positive to the east, across Europe and Asia. Longitude is negative to the west, across the Atlantic and the Americas.': 'Die Geographische Länge ist 0 am Nullmeridian (GMT) und positiv in Richtung Osten (z.B. Großteil Europas und ganz Asien). In Richtung Westen - über den Atlantik und nach Amerika - ist sie negativ.',
'Longitude is zero on the prime meridian (through Greenwich, United Kingdom) and is positive to the east, across Europe and Asia. Longitude is negative to the west, across the Atlantic and the Americas.': 'Die Geographische Länge ist 0 am Nullmeridian (GMT) und positiv in Richtung Osten (z.B. Großteil Europas und ganz Asien). In Richtung Westen - über den Atlantik und nach Amerika - ist sie negativ.',
'Longitude of Map Center': 'Geographische Länge des Kartenmittelpunktes',
'Longitude of far eastern end of the region of interest.': 'Geographische Länge des östlichen Endes de Interessensgebietes.',
'Longitude of far western end of the region of interest.': 'Geographische Länge des westlichen Endes de Interessensgebietes.',
'Longitude should be between': 'Die Geographische Länge soll in folgendem Bereich liegen',
'Longitude': 'Geographische Länge',
'Looting': 'Plünderung',
'Lost Password': 'Kennwort vergessen',
'Lost': 'Verloren',
'Low': 'Niedrig',
'Low Tide Depth': 'Tiefe bei minimaler Tide',
'Magnetic Storm': 'Magnetischer Sturm',
'Mail': 'Post',
'Main Facility': 'Haupteinrichtung',
'Major Damage': 'Großer Schaden',
'Major expenses': 'Hauptausgaben',
'Major outward damage': 'Größter nach außen gerichteter Schaden',
'Major': 'Maßgeblich',
'Make Commitment': 'Eine Zusage machen',
'Make New Commitment': 'Neue Zusage machen',
'Make Request': 'Anfrage erstellen',
'Make Supplies Request': 'Artikelanfrage stellen',
'Make preparations per the <instruction>': 'Vorbereitungen treffen für <instruction>',
'Male': 'Männlich',
'Manage Appointments': 'Terminverwaltung',
'Manage Layers in Catalog': 'Kartenebenen im Katalog verwalten',
'Manage Relief Item Catalogue': 'Katalog der Unterstützungselemente verwalten',
'Manage Users & Roles': 'Benutzer- und Rollenverwaltung',
'Manage Warehouses/Sites': 'Warenlager/Orte verwalten',
'Manage Your Facilities': 'Eigene Einrichtungen verwalten',
'Manage requests for supplies, assets, staff or other resources. Matches against Inventories where supplies are requested.': 'Verwaltung der Anfragen nach Vorräten, Anlagen, Mitarbeitern oder anderen Ressourcen. Vergleich mit den Beständen, wo Vorräte angefordert werden',
'Manage requests of hospitals for assistance.': 'Verwaltung der Anfragen von Krankenhäusern nach Unterstützung.',
'Manage volunteers by capturing their skills, availability and allocation': 'Verwaltung der Freiwilligen Helfer anhand ihrer Fähigkeiten, Verfügbarkeit und Zuordnung.',
'Managing Office': 'Verwaltungsbüro',
'Mandatory Appointment': 'Obligatorischer Termin',
'Mandatory for Children': 'Obligatorisch für Kinder',
'Mandatory for Adolescents': 'Obligatorisch für Jugendliche',
'Mandatory for Adults': 'Obligatorisch für Erwachsene',
'Mandatory. In GeoServer, this is the Layer Name. Within the WFS getCapabilities, this is the FeatureType Name part after the colon(:).': 'Zwingend erforderlich. Beim GeoServer, ist das der Name des Layers. In den WFS Capabilities entspricht es dem Namen des FeatureType (ohne namespace - Teil hinter dem Doppelpunkt!).',
'Mandatory. The URL to access the service.': 'Zwingend erforderlich. Der URL um auf den Dienst zuzugreifen.',
'Manual Synchronization': 'Manuelle Synchronisation',
'Manual': 'Anleitung',
'Many': 'Viele',
'Map Center Latitude': 'Geographische Breite des Kartenmittelpunktes',
'Map Center Longitude': 'Geographische Länge des Kartenmittelpunktes',
'Map Profile Details': 'Details zur Kartenkonfiguration ',
'Map Profile added': 'Kartenkonfiguration hinzugefügt',
'Map Profile deleted': 'Kartenkonfiguration gelöscht',
'Map Profile removed': 'Kartenkonfiguration entfernt',
'Map Profile updated': 'Kartenkonfiguration aktualisiert',
'Map Profile': 'Kartenkonfiguration',
'Map Profiles': 'Kartenkonfigurationen',
'Map Height': 'Höhe des Kartenfensters',
'Map Service Catalog': 'Karten Service-Katalog',
'Map Settings': 'Karteneinstellungen',
'Map Styles': 'Kartensymbolisierungen',
'Map Viewing Client': 'Kartenviewer',
'Map Width': 'Breite des Kartenfensters',
'Map Zoom': 'Kartenvergrößerung',
'Map of Hospitals': 'Karte der Krankenhäuser',
'Map of Offices': 'Karte der Büros',
'Map of Requests': 'Karte der Anfragen',
'Map of Vehicles': 'Karte der Fahrzeuge',
'Map': 'Karte',
'Marine Security': 'Hafensicherheit',
'Marital Status': 'Familienstand',
'Marker Details': 'Details zum Marker/Symbol',
'Marker added': 'Marker/Symbol hinzugefügt',
'Mark as duplicate': 'Als Duplikat markieren',
'Marker deleted': 'Marker/Symbol gelöscht',
'Marker updated': 'Marker/Symbol hinzugefügt',
'Marker': 'Marker/Symbol',
'Markers': 'Marker/Symbole',
'Master Message Log to process incoming reports & requests': 'Haupt-Nachrichtenprotokoll um eingehende Berichte und Anfragen zu bearbeiten',
'Master Message Log': 'Haupt-Nachrichtenprotokoll',
'Match Percentage': 'Grad der Übereinstimmung',
'Match Requests': 'Passende Anfrage',
'Match percentage indicates the % match between these two records': 'Der Grad der Übereinstimmung gibt die prozentuale Übereinstimmung zwischen zwei Datensätzen an',
'Match?': 'Übereinstimmung?',
'Matching Catalog Items': 'Übereinstimmende Katalogelemente',
'Matching Items': 'Übereinstimmende Artikel',
'Matching Records': 'Übereinstimmende Datensätze',
'Maximum Extent': 'Maximale Ausdehnung',
'Maximum Location Latitude': 'Maximale Geographische Breite des Gebietes',
'Maximum Location Longitude': 'Maximale Geographische Länge des Gebietes',
'Max Height': 'Max Höhe',
'Medical': 'Medizin',
'Medical and public health': 'Medizinische Betreuung und öffentliches Gesundheitswesen',
'Medium': 'Mittel',
'Megabytes per Month': 'Megabytes pro Monat',
'Member removed from Group': 'Mitglied aus Gruppe entfernt',
'Members': 'Mitglieder',
'Membership Details': 'Details zur Mitgliedschaft',
'Membership Fee': 'Mitgliedsbeitrag',
'Membership Paid': 'Kostenpflichtige Mitgliedschaft',
'Membership Types': 'Mitgliedschaftstypen',
'Membership updated': 'Mitgliedschaft aktualisiert',
'Membership': 'Mitgliedschaft',
'Memberships': 'Mitgliedschaften',
'Message Details': 'Details zur Nachricht',
'Message Log': 'Nachrichtenprotokoll',
'Message Variable': 'Nachrichtenvariable',
'Message added': 'Nachricht hinzugefügt',
'Message deleted': 'Nachricht gelöscht',
'Message updated': 'Nachricht aktualisiert',
'Message variable': 'Nachrichtenvariable',
'Message': 'Nachricht',
'Messages': 'Nachrichten',
'Messaging settings updated': 'Einstellungen zur Nachrichtenübertragung aktualisiert',
'Messaging': 'Nachrichtenübertragung',
'Measure Length: Click the points along the path & end with a double-click': 'Längenmessung: Punkte entlang eines Verlaufs anklicken und mit Doppelklick abschließen',
'Meteorite': 'Meteorit',
'Meteorological (inc. flood)': 'Meteorologisch (auch Flut)',
'Method used': 'Verwendete Methode',
'Middle Name': 'Zweiter Vorname',
'Migrants or ethnic minorities': 'Migranten oder ethnische Minderheiten',
'Military': 'Militär',
'Military Grid Reference System PDFs': 'Military Grid Reference System PDFs',
'Minimum Location Latitude': 'Minimale Geographische Breite des Gebietes',
'Minimum Location Longitude': 'Minimale Geographische Länge des Gebietes',
'Minimum shift time is 6 hours': 'Minimum Dienstzeit ist sechs Stunden.',
'Minor Damage': 'Kleinere Schäden',
'Minor/None': 'Gering / Keine',
'Minorities participating in coping activities': 'Minderheiten beteiligen sich an Bewältigungsaktivitäten / Krisenbewältigungsaktivitäten',
'Minutes must be a number between 0 and 60': 'Minuten muss eine Zahl zwischen 0 und 60 sein',
'Minutes per Month': 'Minuten pro Monat',
'Minutes should be a number greater than 0 and less than 60': 'Minuten muss eine Zahl größer als 0 und kleiner als 60 sein',
'Miscellaneous': 'Verschiedenes',
'Missed': 'Verpasst',
'missed': 'verpasst',
'Missing Person Details': 'Nähere Angaben zur vermissten Person',
'Missing Person Registry': 'Register der vermissten Personen',
'Missing Person': 'Vermisste Person',
'Missing Persons Registry': 'Register der vermissten Personen',
'Missing Persons Report': 'Bericht über vermisste Personen',
'Missing Persons': 'Vermisste Personen',
'Missing Report': 'Bericht über Vermisste',
'Missing Senior Citizen': 'Vermisster älterer Bürger',
'Missing Vulnerable Person': 'Vermisste gefährdete Person',
'Missing': 'Fehlend',
'Mission Record': 'Auftragsbericht',
'Mission added': 'Auftrag hinzugefügt',
'Mission deleted': 'Auftrag gelöscht',
'Mission updated': 'Auftrag aktualisiert',
'Missions': 'Aufträge',
'Mobile Basic Assessment': 'Mobile Grundlegende Beurteilung',
'Mobile Commons Channels': 'Mobile Commons Kanäle',
'Mobile Phone': 'Mobiltelefon',
'Mobile': 'Handy',
'Mode': 'Modus',
'Model/Type': 'Modell/Typ',
'Modem Settings': 'Modemeinstellungen',
'Modem settings updated': 'Modemeinstellungen aktualisiert',
'Moderate': 'Moderat',
'Modified by': 'Geändert von',
'Modify Information on groups and individuals': 'Anpassen der Information über Gruppen und Einzelpersonen',
'Modifying data in spreadsheet before importing it to the database': 'Anpassen von Daten in der Tabelle vor dem Import in die Datenbank',
'Module provides access to information on current Flood Levels.': 'Modul bietet Zugriff auf Information zum aktuellen Stand der Flut',
'Module': 'Modul',
'Monday': 'Montag',
'Monetization Report': 'Monetarisierungsbericht',
'Monitoring Frequency': 'Monitoring Frequenz',
'Monthly Cost': 'Monatliche Kosten',
'Monthly Salary': 'Monatliches Gehalt',
'Month': 'Monat',
'Monthly': 'Monatlich',
'Months': 'Monate',
'More': 'Mehr',
'More Options': 'Mehr Optionen',
'Morgue Status': 'Status der Leichenhalle',
'Morgue Units Available': 'Leichenhallenplätze verfügbar',
'Mosque': 'Moschee',
'Mother': 'Mutter',
'Motorcycle': 'Motorrad',
'Moustache': 'Schnurrbart',
'MultiPolygon': 'MultiPolygon',
'Multiple Matches': 'Mehrere Übereinstimmungen',
'Multiple': 'Mehrere',
'Muslim': 'Moslem',
'Must a location have a parent location?': 'Muss ein Standort einen übergeordneten Standort haben?',
'My Current function': 'Meine aktuelle Funktion',
'My Tasks': 'Meine Aufgaben',
'My Open Tasks': 'Meine unerledigten Aufgaben',
'N/A': 'Nicht zutreffend',
'NO': 'NEIN',
'NZSEE Level 1': 'NZSEE Stufe 1',
'NZSEE Level 2': 'NZSEE Stufe 2',
'Name and/or ID': 'Name und/oder ID',
'Name of Award': 'Name der Auszeichnung',
'Name of Driver': 'Name des Fahrers',
'Name of Institute': 'Name der Institution',
'Name of the file (& optional sub-path) located in static which should be used for the background of the header.': 'Name der Datei (& optionales Unterverzeichnis) die sich in static befindet und die für den Hintergrund des Headers benutzt werden soll.',
'Name of the file (& optional sub-path) located in static which should be used for the top-left image.': 'Name der Datei (& optionales Unterverzeichnis) die sich in static befindet und für das obere linke Bild verwendet werden soll.',
'Name of the file (& optional sub-path) located in views which should be used for footer.': 'Name der Datei (& optionales Unterverzeichnis) die sich in views befindet und für die Fußzeile verwendet werden soll.',
'Name of the person in local language and script (optional).': 'Name der Person in lokaler Sprache und Schreibweise (optional).',
'Name': 'Name',
'Name, Org and/or ID': 'Name, Org und/oder ID',
'Names can be added in multiple languages': 'Namen können in mehreren Sprachen hinzugefügt werden',
'National ID Card': 'Nationaler Identitätsnachweis',
'National NGO': 'Nationale NGO',
'Nationality of the person.': 'Nationalität der Person.',
'Nationality': 'Nationalität',
'native': 'Muttersprache',
'Nautical Accident': 'See-Unfall',
'Nautical Hijacking': 'See-Entführung',
'Need Details': 'Details zum Bedarf',
'Need Type Details': 'Details zum Bedarfstyp',
'Need Type added': 'Bedarfstyp hinzugefügt',
'Need Type deleted': 'Bedarfstyp gelöscht',
'Need Type updated': 'Bedarfstyp aktualisiert',
'Need Type': 'Bedarfstyp',
'Need Types': 'Bedarfstypen',
'Need added': 'Bedarf hinzugefügt',
'Need deleted': 'Bedarf gelöscht',
'Need to be logged-in to be able to submit assessments': 'Sie müssen eingeloggt sein um Beurteilungen zu veröffentlichen',
'Need to configure Twitter Authentication': 'Die Twitter Authentifizierungsdaten müssen konfiguriert sein',
'Need to specify a Budget!': 'Sie müssen ein Budget angegeben!',
'Need to specify a Kit!': 'Müssen Sie eine Ausstattung (Kit) angeben!',
'Need to specify a Resource!': 'Sie müssen eine Ressource angeben.',
'Need to specify a bundle!': 'Sie müssen ein Produktpaket angeben!',
'Need to specify a group!': 'Sie müssen einen Gruppe angeben!',
'Need to specify a location to search for.': 'Sie müssen ein Gebiet/Position für die Suche angeben.',
'Need to specify a role!': 'Sie müssen eine Rolle definieren!',
'Need to specify a table!': 'Sie müssen einen Tabellennamen angeben!',
'Need to specify a user!': 'Ein Benutzer muss angegeben werden!',
'Need updated': 'Bedarf aktualisiert',
'Needs Details': 'Details zum Bedarf',
'Needs Maintenance': 'Braucht Wartung',
'Needs to reduce vulnerability to violence': 'Handlungsbedarf um die Anfälligkeit für Gewalt zu verringern',
'Need': 'Bedarf',
'Needs': 'Bedarf',
'Neighborhood': 'Nachbarschaft',
'Neighbouring building hazard': 'Risiko durch benachbarte Gebäude',
'Neonatal ICU': 'Neugeborenen ICU',
'Neonatology': 'Neonatologie',
'Network': 'Netzwerk',
'Neurology': 'Neurologie',
'New Assessment reported from': 'Neue Beurteilung erstellt durch',
'New Certificate': 'Neues Zertifikat',
'New Checklist': 'Neue Prüfliste',
'New Entry': 'Neuer Eintrag',
'New Event': 'Neues Ereignis',
'New Item Category': 'Neue Kategorie für Artikel',
'New Job Role': 'Neue Tätigkeit',
'New Location Group': 'Neue Standortgruppe',
'New Location': 'Neuer Standort/Gebiet',
'New Peer': 'Neuer Peer',
'New Record': 'Neuer Datensatz',
'New Request': 'Neue Anfrage',
'New Role': 'Neue Rolle',
'New Scenario': 'Neues Szenario',
'New Skill': 'Neue Fähigkeit',
'New Solution Choice': 'Neue Lösungswahl',
'New Staff Member': 'Neue Mitarbeiter',
'New Status': 'Neuer Status',
'New Stock Count': 'Neue Anzahl des Lagerbestands',
'New Support Request': 'Neue Unterstützunganfrage',
'New Synchronization Peer': 'Neuer Synchronisations Peer',
'New Team': 'Neues Team',
'New Training Course': 'Neuer Schulungskurs',
'New Volunteer': 'Neuer Freiwilliger',
'New cases in the past 24h': 'Neue Fälle in den letzten 24h',
'New': 'Neu',
'Next': 'Nächste',
'No': 'Nein',
'No Activities Found': 'Keine Aktivitäten gefunden',
'No Alternative Items currently registered': 'Zurzeit sind keine alternativen Artikel registriert',
'No Assessment Summaries currently registered': 'Zurzeit sind keine Beurteilungszusammenfassungen registriert',
'No Assessments currently registered': 'Zurzeit sind keine Beurteilungen registriert.',
'No Assets currently registered in this event': 'Zurzeit sind keine Anlagen zu diesem Ereignis registriert',
'No Assets currently registered in this scenario': 'Zurzeit sind keine Anlagen zu diesem Szenario registriert',
'No Assets currently registered': 'Zurzeit sind keine Anlagen registriert',
'No Baseline Types currently registered': 'Zurzeit sind keine Referenzdatumstypen registriert',
'No Baselines currently registered': 'Zurzeit sind keine Referenzdaten registriert',
'No Brands currently registered': 'Zurzeit sind keine Markenregistriert',
'No Budgets currently registered': 'Zurzeit sind keine Budgets registriert',
'No Bundles currently registered': 'Zurzeit sind keine Produktpakete registriert',
'No Camp Services currently registered': 'Zurzeit sind keine Camp-Leistungen registriert',
'No Camp Types currently registered': 'Zurzeit sind keine Typen von Camps registriert',
'No Camps currently registered': 'Zurzeit sind keine Camps registriert',
'No Catalog Items currently registered': 'Zurzeit sind keine Katalogeinträge registriert',
'No Catalogs currently registered': 'Zurzeit sind keine Kataloge registriert',
'No Checklist available': 'Zurzeit sind keine Checklisten verfügbar',
'No Cluster Subsectors currently registered': 'Zurzeit sind keine Cluster Teilbereiche registriert',
'No Clusters currently registered': 'Zurzeit sind keine Cluster registriert',
'No Commitment Items currently registered': 'Zurzeit sind keine zugesagten Artikel registriert',
'No Commitments': 'Zurzeit sind keine Zusagen registriert',
'No Credentials currently set': 'Derzeit keine Berechtigungen hinterlegt',
'No Details currently registered': 'Zurzeit sind keine Details registriert',
'No Documents found': 'Keine Dokumente gefunden',
'No Donors currently registered': 'Zurzeit sind keine Spender registriert',
'No Events currently registered': 'Zurzeit sind keine Ereignisse registriert',
'No Facilities currently registered in this event': 'Für dieses Ereignis ist zurzeit keine Einrichtung registriert',
'No Facilities currently registered in this scenario': 'Für dieses Szenario ist zurzeit keine Einrichtung registriert.',
'No Family Members currently registered': 'Zurzeit keine Familienmitglieder registriert',
'No Feature Layers currently defined': 'Zurzeit sind keine Objekt-Layer definiert',
'No Flood Reports currently registered': 'Zurzeit sind keine Flutberichte registriert',
'No Groups currently defined': 'Zurzeit sind keine Gruppen definiert',
'No Groups currently registered': 'Zurzeit sind keine Gruppen registriert',
'No Hospitals currently registered': 'Zurzeit sind keine Krankenhäuser registriert',
'No Human Resources currently registered in this event': 'Für dieses Ereignis sind zurzeit keine personellen Ressourcen registriert.',
'No Human Resources currently registered in this scenario': 'Für dieses Szenario sind zurzeit keine personellen Ressourcen registriert.',
'No Identification Report Available': 'Kein Identifizierungbericht verfügbar',
'No Identities currently registered': 'Zurzeit sind keine Identitäten registriert',
'No Image': 'Kein Bild',
'No Images currently registered': 'Zurzeit sind keine Bilder registriert',
'No Impact Types currently registered': 'Zurzeit sind keine Auswirkungsarten registriert',
'No Impacts currently registered': 'Zurzeit sind keine Auswirkungen registriert',
'No Incident Reports currently registered': 'Zurzeit sind keine Vorfallberichte registriert',
'No Incoming Shipments': 'Keine eingehenden Lieferungen',
'No instructions for this flag': 'Keine Anweisungen zu dieser Markierung',
'No Item Categories currently registered': 'Zurzeit sind keine Artikelkategorien registriert',
'No Item Packs currently registered': 'Zurzeit sind keine Artikelpakete registriert',
'No Items currently registered in this Inventory': 'Für diesen Bestand sind zurzeit keine Artikel registriert',
'No Items currently registered': 'Zurzeit sind keine Artikel registriert',
'No Keys currently defined': 'Zurzeit sind keine Schlüssel definiert',
'No Kits currently registered': 'Zurzeit sind keine Ausstattungen (Kits) definiert',
'No Level 1 Assessments currently registered': 'Zurzeit keine Stufe 1 Beurteilungen registriert',
'No Level 2 Assessments currently registered': 'Zurzeit keine Stufe 2 Beurteilungen registriert',
'No Locations currently available': 'Keine Standorte/Gebiete verfügbar',
'No Locations currently registered': 'Zurzeit sind keine Standorte/Gebiete registriert',
'No Map Profiles currently defined': 'Zurzeit sind keine Kartenkonfigurationen definiert',
'No Map Profiles currently registered in this event': 'Für dieses Ereignis sind zurzeit keine Kartenkonfigurationen registriert',
'No Map Profiles currently registered in this scenario': 'Für dieses Szenario sind zurzeit keine Kartenkonfigurationen registriert',
'No Markers currently available': 'Zurzeit sind keine Marker/Symbole verfügbar',
'No Match': 'Keine Übereinstimmung',
'No Matching Catalog Items': 'Keine passenden Katalogelemente',
'No Matching Items': 'Keine passenden Artikel',
'No Matching Records': 'Keine passenden Datensätze',
'No Members currently registered': 'Zurzeit sind keine Mitglieder registriert',
'No Memberships currently defined': 'Zurzeit sind keine Mitgliedschaften definiert',
'No Messages currently in Outbox': 'Zurzeit sind keine Nachrichten im Postausgang',
'No Need Types currently registered': 'Zurzeit sind keine Anforderungstypen registriert',
'No Needs currently registered': 'Zurzeit sind keine Anforderungen registriert',
'No Offices currently registered': 'Zurzeit sind keine Büros registriert',
'No Offices found!': 'Keine Büros gefunden!',
'No Organizations currently registered': 'Zurzeit sind keine Organisationen registriert',
'No options available': 'Keine Optionen verfügbar',
'No payments specified': 'Keine Auszahlungen angegeben',
'No People currently registered in this camp': 'Zurzeit sind in diesem Camp keine Personen registriert',
'No People currently registered in this shelter': 'Zurzeit sind in dieser Unterkunft keine Personen registriert',
'No Persons currently registered': 'Zurzeit sind keine Personen registriert',
'No Persons currently reported missing': 'Zurzeit sind keine Personen vermisst gemeldet',
'No Persons found': 'Keine Personen gefunden',
'No Photos found': 'Keine Fotos gefunden',
'No Picture': 'Kein Bild',
'No Population Statistics currently registered': 'Zurzeit sind keine Bevölkerungsstatistiken registriert',
'No Presence Log Entries currently registered': 'Zurzeit gibt es keine Anwesenheitsprotokolleinträge',
'No Problems currently defined': 'Zurzeit sind keine Probleme definiert',
'No Projections currently defined': 'Zurzeit sind keine Kartenprojektionen definiert',
'No Projects currently registered': 'Zurzeit sind keine Projekte registriert',
'No Rapid Assessments currently registered': 'Zurzeit sind keine Schnell-Beurteilungen registriert',
'No Received Items currently registered': 'Zurzeit sind keine erhaltenen Lieferungen registriert',
'No Received Shipments': 'Keine erhaltene Lieferungen',
'No Records currently available': 'Zurzeit sind keine Datensätze registriert',
'No Request Items currently registered': 'Zurzeit sind keine angefragten Artikel registriert',
'No Requests': 'Keine Anfragen',
'No Residents Reports found': 'Keine Bewohnerliste gefunden',
'No Rivers currently registered': 'Zurzeit sind keine Flüsse registriert',
'No Roles currently defined': 'Zurzeit sind keine Rollen definiert',
'No Rooms currently registered': 'Zurzeit sind keine Räume registriert',
'No Scenarios currently registered': 'Derzeit sind keine Szenarios eingetragenZurzeit sind keine Szenarios registriert',
'No Sections currently registered': 'Zurzeit sind keine Abschnitte registriert',
'No Sectors currently registered': 'Zurzeit sind keine Bereiche registriert',
'No Sent Items currently registered': 'Zurzeit sind keine gesendeten Artikel registriert',
'No Sent Shipments': 'Keine versandten Lieferungen',
'No Settings currently defined': 'Zurzeit sind keine Einstellungen definiert',
'No Shelter Services currently registered': 'Zurzeit sind keine Unterkunftsleistungen registriert',
'No Shelter Types currently registered': 'Zurzeit sind keine Unterkunfttypen registriert',
'No Shelters currently registered': 'Zurzeit sind keine Unterkünfte registriert',
'No Solutions currently defined': 'Zurzeit sind keine Lösungen definiert',
'No Staff Types currently registered': 'Zurzeit sind keine Mitarbeitertypen registriert',
'No Subscription available': 'Keine Abonnements verfügbar',
'No Subsectors currently registered': 'Zurzeit sind keine Teilbereiche registriert',
'No Support Requests currently registered': 'Zurzeit sind keine Unterstützungsanfragen registriert',
'No Survey Answers currently entered.': 'Zurzeit wurden noch keine Antworten auf Umfragen eingegeben.',
'No Survey Questions currently registered': 'Zurzeit wurden noch keine Umfragen-Fragen registriert. ',
'No Survey Series currently registered': 'Zurzeit wurden noch keine Umfragenserie registriert',
'No Survey Template currently registered': 'Zurzeit wurden noch keine Umfragen-Vorlage registriert',
'No Tasks with Location Data': 'Für dieses Gebiet/Standort liegen zurzeit keine Aufgaben vor',
'No Teams currently registered': 'Zurzeit wurden noch keine Teams registriert',
'No Themes currently defined': 'Zurzeit wurden noch keine Themen registriert',
'No Tickets currently registered': 'Zurzeit wurden noch keine Tickets registriert',
'No Tracks currently available': 'Zurzeit sind noch keine Tracks verfügbar',
'No transferable cases found': 'Keine transferierbaren Fälle gefunden',
'No Users currently registered': 'Zurzeit wurden noch keine Benutzer registriert',
'No Volunteers currently registered': 'Zurzeit sind noch keine Freiwilligen registriert',
'No Warehouses currently registered': 'Zurzeit sind noch keine Warenlager registriert',
'No access at all': 'Kein Zugriff',
'No access to this record!': 'Kein Zugriff auf diesen Datensatz!',
'No action recommended': 'Keine Aktion empfohlen',
'No conflicts logged': 'Keine Konflikte protokolliert',
'No contact information available': 'Keine Kontaktinformation verfügbar',
'No contacts currently registered': 'Zurzeit sind noch keine Kontakte registriert',
'No data available': 'Keine Daten verfügbar',
'No data in this table - cannot create PDF!': 'Keine Daten in dieser Tabelle - PDF kann nicht erstellt werden!',
'No databases in this application': 'Keine Datenbanken in dieser Anwendung',
'No dead body reports available': 'Keine Leichenberichte verfügbar',
'No entries found': 'Keine Einträge gefunden',
'No entries matching the query': 'Die Abfrage lieferte keine Einträge',
'No entry available': 'Kein Eintrag verfügbar',
'No location known for this person': 'Für diese Person ist kein Gebiet/Standort bekannt',
'No locations found for members of this team': 'Für Mitglieder dieses Teams ist kein Gebiet/Standort bekannt',
'No log entries matching the query': 'Die Abfrage lieferte keine Protokolleinträge',
'No messages in the system': 'Keine Nachrichten im System',
'No peers currently registered': 'Zurzeit sind keine Peers registriert',
'No pending payments': 'Keine anstehenden Auszahlungen',
'No pending registrations found': 'Keine anstehenden Registrierungen gefunden',
'No pending registrations matching the query': 'Die Abfrage lieferte keine keine anstehenden Registrierungen',
'No person record found for current user.': 'Kein Personendatensatz für den aktuellen Benutzer gefunden.',
'No person found with this ID number': 'Keine Person mit dieser ID Nummer gefunden',
'No problem group defined yet': 'Noch keine Problem-Gruppe definiert',
'No records found': 'Keine Datensätze gefunden',
'No records matching the query': 'Die Abfrage lieferte keine Datensätze',
'No reports available.': 'Keine Berichte verfügbar.',
'No reports currently available': 'Zurzeit sind keine Berichte verfügbar',
'No requests found': 'Keine Anfragen gefunden',
'No resources currently reported': 'Zurzeit sind keine Ressourcen gemeldet',
'No service profile available': 'Kein Leistungsprofil verfügbar',
'No skills currently set': 'Zurzeit sind keine Fähigkeiten festgelegt',
'No staff or volunteers currently registered': 'Zurzeit sind weder Mitarbeiter noch Freiwillige registriert',
'No status information available': 'Keine Statusinformation verfügbar',
'No synchronization': 'Keine Synchronisation',
'No tasks currently registered': 'Zurzeit sind keine Aufgaben registriert',
'No template found!': 'Keine Vorlage gefunden!',
'No units currently registered': 'Zurzeit sind keine Einheiten registriert',
'No volunteer availability registered': 'Zurzeit ist keine Verfügbarkeit von Freiwilligen registriert',
'Non-structural Hazards': 'Nicht-strukturelle Gefahren',
'None (no such record)': 'Nichts (kein entsprechender Datensatz)',
'None': '-',
'None of the above': 'Keine(r) der oben genannten',
'Noodles': 'Nudeln',
'Normal Address': 'Normale Adresse',
'Normal Job': 'Normaler Beruf',
'Not Applicable': 'Nicht zutreffend',
'Not Authorised!': 'Nicht berechtigt!',
'Not Authorized': 'Nicht berechtigt',
'Not Available': 'Nicht verfügbar/vorhanden',
'Not currently a resident': 'Kein aktueller Bewohner',
'Not Possible': 'Nicht möglich',
'Not Required': 'Nicht erforderlich',
'Not Set': 'Nicht festgelegt',
'Not Transferable': 'Nicht Transferierbar',
'Not installed or incorrectly configured.': 'Nicht installiert oder nicht korrekt konfiguriert.',
'Not yet a Member of any Group': 'Bis jetzt kein Mitglied irgendeiner Gruppe',
'Note that this list only shows active volunteers. To see all people registered in the system, search from this screen instead': 'Beachten Sie, dass diese Liste nur aktive Freiwillige zeigt. Um alle registrierten Personen im System zu sehen, suchen sie statt dessen auf diesem Bildschirm',
'Notes': 'Notizen',
'Notice to Airmen': 'Hinweis für Flieger',
'Notify': 'Benachrichtigen',
'Number': 'Anzahl',
'Number of Activities': 'Zahl der Aktivitäten',
'Number of Activities per': 'Zahl der Aktivitäten pro',
'Number of Barges': 'Zahl der Lastschiffe',
'Number of Columns': 'Anzahl der Spalten',
'Number of Patients': 'Anzahl der Patienten',
'Number of People Required': 'Anzahl der benötigten Personen',
'Number of Rows': 'Anzahl der Reihen',
'Number of Tugboats': 'Zahl der Schleppkähne',
'Number of additional beds of that type expected to become available in this unit within the next 24 hours.': 'Anzahl von zusätzlichen Betten dieses Typs, die voraussichtlich in den nächsten 24 Stunden in dieser Einheit zur Verfügung stehen werden.',
'Number of alternative places for studying': 'Anzahl von alternativen Orten zum studieren.',
'Number of available/vacant beds of that type in this unit at the time of reporting.': 'Anzahl von verfügbaren/freien Betten dieses Typs in dieser Einheit zum Zeitpunkt des Berichtes.',
'Number of deaths during the past 24 hours.': 'Anzahl von Toten in den letzten 24 Stunden',
'Number of discharged patients during the past 24 hours.': 'Anzahl der entlassenen Patienten in den vergangen 24 Stunden',
'Number of doctors': 'Anzahl der Ärzte',
'Number of evacuees registered in the shelter for day and night': 'Zahl der in der Unterkunft für Tag und Nacht registrierten Personen',
'Number of in-patients at the time of reporting.': 'Anzahl von in-Patienten zum Zeitpunkt der Berichterstellung',
'Number of newly admitted patients during the past 24 hours.': 'Anzahl der neu zugewiesenen Patienten innerhalb der letzten 24 Stunden',
'Number of non-medical staff': 'Anzahl des nicht-medizinischen Personals',
'Number of nurses': 'Anzahl der Krankenschwestern',
'Number of private schools': 'Anzahl der privaten Schulen',
'Number of public schools': 'Anzahl der öffentlichen Schulen',
'Number of religious schools': 'Anzahl der religiösen Schulen',
'Number of residential units not habitable': 'Anzahl der nicht bewohnbaren Wohneinheiten',
'Number of residential units': 'Anzahl der Wohneinheiten',
'Number of vacant/available beds in this hospital. Automatically updated from daily reports.': 'Anzahl der freien/verfügbaren Betten in diesem Krankenhaus. Automatisch aktualisiert aus täglichen Berichten.',
'Number of vacant/available units to which victims can be transported immediately.': 'Anzahl der freien/verfügbaren Einheiten zu denen die Opfer sofort transportiert werden können.',
'Number or Label on the identification tag this person is wearing (if any).': 'Nummer oder Beschriftung auf der Identifikationsmarke den diese Person trägt (falls vorhanden).',
'Number or code used to mark the place of find, e.g. flag code, grid coordinates, site reference number or similar (if available)': 'Nummer oder Code verwendet markiert den Fundort , z. B. Flaggencode, Koordinaten, Standortnummer oder ähnliches (falls verfügbar)',
'Number': 'Nummer',
'Number/Percentage of affected population that is Female & Aged 0-5': 'Anzahl/Prozentsatz der betroffenen weiblichen Bevölkerung im Alter zwischen 0-5 Jahren',
'Number/Percentage of affected population that is Female & Aged 13-17': 'Anzahl/Prozentsatz der betroffenen weiblichen Bevölkerung im Alter zwischen 13-17 Jahren',
'Number/Percentage of affected population that is Female & Aged 18-25': 'Anzahl/Prozentsatz der betroffenen weiblichen Bevölkerung im Alter zwischen 18-25 Jahren',
'Number/Percentage of affected population that is Female & Aged 26-60': 'Anzahl/Prozentsatz der betroffenen weiblichen Bevölkerung im Alter zwischen 26-60 Jahren',
'Number/Percentage of affected population that is Female & Aged 6-12': 'Anzahl/Prozentsatz der betroffenen weiblichen Bevölkerung im Alter zwischen 6-12 Jahren',
'Number/Percentage of affected population that is Female & Aged 61+': 'Anzahl/Prozentsatz der betroffenen weiblichen Bevölkerung über 61',
'Number/Percentage of affected population that is Male & Aged 0-5': 'Anzahl/Prozentsatz der betroffenen männlichen Bevölkerung im Alter zwischen 0-5 Jahren',
'Number/Percentage of affected population that is Male & Aged 13-17': 'Anzahl/Prozentsatz der betroffenen männlichen Bevölkerung im Alter zwischen 13-17 Jahren',
'Number/Percentage of affected population that is Male & Aged 18-25': 'Anzahl/Prozentsatz der betroffenen männlichen Bevölkerung im Alter zwischen 18-25 Jahren',
'Number/Percentage of affected population that is Male & Aged 26-60': 'Anzahl/Prozentsatz der betroffenen männlichen Bevölkerung im Alter zwischen 26-60 Jahren',
'Number/Percentage of affected population that is Male & Aged 6-12': 'Anzahl/Prozentsatz der betroffenen männlichen Bevölkerung im Alter zwischen 6-12 Jahren',
'Number/Percentage of affected population that is Male & Aged 61+': 'Anzahl/Prozentsatz der betroffenen männlichen Bevölkerung über 61',
'Nursery Beds': 'Krankenhausbetten',
'Nutrition problems': 'Ernährungsprobleme',
'Nutrition': 'Nahrung',
'Opportunities to Volunteer On-Site?': 'Möglichkeiten für Freiwillige vor Ort?',
'OR Reason': 'oder Grund',
'OR Status Reason': 'oder Statusgrund',
'OR Status': 'oder Status',
'Observer': 'Beobachter',
'Obsolete': 'Veraltet',
'Obstetrics/Gynecology': 'Geburtshilfe/Gynäkologie',
'Office Address': 'Büroadresse',
'Office Details': 'Bürodetails',
'Office Phone': 'Telefon im Büro',
'Office Type': 'Bürotyp',
'Office Types': 'Bürotypen',
'Office added': 'Büro hinzugefügt',
'Office deleted': 'Büro gelöscht',
'Office updated': 'Büro aktualisiert',
'Office': 'Büro',
'Offices & Warehouses': 'Büros & Warenager',
'Offices': 'Büros',
'Offline Sync (from USB/File Backup)': 'Offline-Synchronisation (von USB/Dateisicherung)',
'Offline Sync': 'Offline-Synchronisation',
'Oil Terminal Depth': 'Tiefe des Ölterminals',
'Older people as primary caregivers of children': 'Ältere Menschen als primäre Pfleger von Kindern',
'Older people in care homes': 'Ältere Menschen in Pflegeheimen',
'Older people participating in coping activities': 'Ältere Menschen die sich an Krisenbewältigungsaktivitäten beteiligen',
'Older person (>60 yrs)': 'Ältere Personen (> 60 Jahre)',
'On by default? (only applicable to Overlays)': 'Standardmäßig an? (gilt nur für Overlays)',
'On by default?': 'Standardmäßig an?',
'On Hold': 'Abwarten',
'One Time Cost': 'Einmalige Kosten',
'One time cost': 'Einmalige Kosten',
'One-time costs': 'Einmalige Kosten',
'One-time': 'Einmalig',
'Oops! Something went wrong...': 'Hoppla! Etwas ging schief...',
'Oops! something went wrong on our side.': 'Hoppla! Etwas ging auf unserer Seite schief.',
'Opacity (1 for opaque, 0 for fully-transparent)': 'Opazität (1 für opaque - undurchsichtig, 0 für vollständig transparent)',
'Opacity': 'Opazität (Undurchsichtigkeit)',
'Open area': 'Offener Bereich',
'Open recent': 'Kürzlich Bearbeitetes öffnen',
'Open': 'Öffnen',
'Opening Times': 'Öffnungszeiten',
'OpenStreetMap Tiles': 'OpenStreetMap Tiles',
'OpenWeatherMap data': 'OpenWeatherMap Daten',
'Operating Rooms': 'Betriebsräume',
'Optional link to an Incident which this Assessment was triggered by.': 'Optinaler Link zum einem Vorfall, der diese Beurteilung auslöste.',
'Optional': 'Optional',
'Optional. If you wish to style the features based on values of an attribute, select the attribute to use here.': 'Optional. Wenn Sie die Darstellung der Objekte auf der Basis von Werten eines Attributs festlegen möchten, wählen sie das zu verwendende Attribut hier aus.',
'Optional. In GeoServer, this is the Workspace Namespace URI (not the name!). Within the WFS getCapabilities, this is the FeatureType Name part before the colon(:).': 'Optional. Bei GeoServer, das ist die Arbeitsbereich Namespace-URI (nicht der Name!). Beim WFS "Capabilities", ist dies die Namensteil des FeatureTypes vor dem Doppelpunkt(:).',
'Optional. The name of an element whose contents should be a URL of an Image file put into Popups.': 'Optional. Der Name eines Elements dessen Inhalt eine URL zu einer Bilddatei die im Dialogfenster angezeigt werden soll.',
'Optional. The name of an element whose contents should be put into Popups.': 'Optional. Name eines Elements, dessen Inhalt in Dialogfenstern angezeigt wird.',
'Optional. The name of the schema. In Geoserver this has the form http://host_name/geoserver/wfs/DescribeFeatureType?version=1.1.0&;typename=workspace_name:layer_name.': 'Optional. Name des Schemas. Bei Geoserver wird das Format http://host_name/geoserver/wfs/DescribeFeatureType?version=1.1.0&;typename=workspace_name:layer_name verwendet.',
'Options': 'Optionen',
'Organization Details': 'Details zur Organisation',
'Organization Domains': 'Organisationsdomains',
'Organization Registry': 'Organisationsdatenbank',
'Organization Type': 'Organisationstyp',
'Organization Types': 'Organisationstypen',
'Organization added': 'Organisation hinzugefügt',
'Organization deleted': 'Organisation gelöscht',
'Organization updated': 'Organisation aktualisiert',
'Organization': 'Organisation',
'Organizations': 'Organisationen',
'Organization/Supplier': 'Organisation/Anbieter',
'Organized By': 'Organisiert durch',
'Origin of the separated children': 'Ursprung der getrennten Kinder',
'Origin': 'Ursprung',
'Other Address': 'Andere Adresse',
'Other (describe)': 'Andere (näher beschreiben)',
'Other (specify)': 'Sonstige (näher spezifizieren)',
'Other Evidence': 'Anderer Nachweis',
'Other Faucet/Piped Water': 'Andere Wasserrohre/-hähne',
'Other Isolation': 'Andere Isolierung',
'Other Name': 'Sonstiger Name',
'Other activities of boys 13-17yrs before disaster': 'Andere Aktivitäten von Jungen 13-17 Jahre vor der Katastrophe',
'Other activities of boys 13-17yrs': 'Andere Aktivitäten der Jungen 13-17 Jahre',
'Other activities of boys <12yrs before disaster': 'Andere Aktivitäten von Jungen <12 Jahre vor der Katastrophe',
'Other activities of boys <12yrs': 'Andere Aktivitäten von Jungen <12 Jahren',
'Other activities of girls 13-17yrs before disaster': 'Andere Aktivitäten von Mädchen 13-17 Jahre vor der Katastrophe',
'Other activities of girls 13-17yrs': 'Andere Aktivitäten von Mädchen 13-17 Jahre',
'Other activities of girls<12yrs before disaster': 'Andere Aktivitäten von Mädchen <12 Jahre vor der Katastrophe',
'Other activities of girls<12yrs': 'Andere Aktivitäten von Mädchen <12 Jahre',
'Other alternative infant nutrition in use': 'Andere alternative Kindernahrung die Verwendung findet.',
'Other alternative places for study': 'Andere alternative Orte zum Lernen',
'Other assistance needed': 'Andere Unterstützung benötigt',
'Other assistance, Rank': 'Andere Unterstützung, Rang',
'Other current health problems, adults': 'Andere aktuelle gesundheitliche Probleme, Erwachsene',
'Other current health problems, children': 'Andere aktuelle gesundheitliche Probleme, Kinder',
'Other events': 'Sonstige Ereignisse',
'Other factors affecting school attendance': 'Andere Faktoren mit Einfluss auf den Schulbesuch',
'Other major expenses': 'Andere große Ausgaben',
'Other non-food items': 'Andere non-food Posten',
'Other recommendations': 'Andere Empfehlungen',
'Other residential': 'Andere Bewohner/innen',
'Other school assistance received': 'Andere erhaltene Schulunterstützung',
'Other school assistance, details': 'Andere Schulhilfe, Einzelheiten',
'Other school assistance, source': 'Herkunft anderer Schulhilfen',
'Other settings can only be set by editing a file on the server': 'Andere Einstellungen können nur durch Bearbeiten einer Datei auf dem Server festgelegt werden',
'Other side dishes in stock': 'Andere Speisen auf Lager',
'Other types of water storage containers': 'Andere Arten von Wassertanks',
'Other ways to obtain food': 'Weitere Möglichkeiten um an Nahrungsmitteln zu gelangen',
'Other': 'Sonstige',
'Outbound Mail settings are configured in models/000_config.py.': 'Abgehende Mail-Einstellungen werden in der Datei models/000_config.py konfiguriert.',
'Outbox': 'Ausgang',
'Outcome': 'Folge',
'Outgoing SMS Handler': 'SMS-Handler für ausgehende Informationen',
'Outgoing SMS handler': 'SMS-Handler für ausgehende Informationen',
'Overall Hazards': 'Gefahren insgesamt',
'Overhead falling hazard': 'Gefahr fallender Objekte',
'Overland Flow Flood': 'Überflutung',
'Overview': 'Übersicht',
'Owned By (Organization/Branch)': 'Gehört (Organisation/Niederlassung)',
'Owned Records': 'Eigene Datensätze',
'Owned Resources': 'Eigene Ressourcen',
'Ownership': 'Eigentum',
'Owning Organization': 'In Eigentum von',
'PIN number': 'PIN Nummer',
'PIN': 'PIN',
'PL Women': 'PL Frauen',
'Pack': 'Packung',
'Packs': 'Packungen',
'Paid': 'Ausgezahlt',
'Paid on': 'Ausgezahlt am',
'Parameters': 'Parameter',
'Parapets, ornamentation': 'Geländer, Verzierung',
'Parent Office': 'Übergeordnetes Büro',
'Parent needs to be of the correct level': 'Übergeordnetes Element muss auf der richtigen Stufe sein',
'Parent needs to be set for locations of level': 'Ein übergeordnetes Element muss für Gebiete/Standorte dieser Stufe existieren',
'Parent needs to be set': 'Ein übergeordnetes Element muss definiert werden',
'Parent': 'Übergeordnetes Element',
'Parents/Caregivers missing children': 'Eltern/Pfleger vermissen Kinder',
'Parser Connections': 'Parser Verbindungen',
'Parsers': 'Parser',
'Partial': 'partiell',
'Participant': 'Teilnehmer',
'Pashto': 'Paschtu',
'Pass': 'Übergeben',
'Passport': 'Reisepass',
'Password': 'Passwort',
'Path': 'Pfad',
'Pathology': 'Pathologie',
'Patients': 'Patienten',
'Date of payment required': 'Auszahlungsdatum erforderlich',
'Payload Height (m)': 'Ladekapazität Höhe (m)',
'Payload Length (m)': 'Ladekapazität Länge (m)',
'Payload Volume (m3)': 'Ladekapazität Volumen (m3)',
'Payload Weight (kg)': 'Ladekapazität Gewicht (kg)',
'Payload Width (m)': 'Ladekapazität Breite (m)',
'Payment Date': 'Auszahlungsdatum',
'Payment Registration': 'Auszahlungsregistrierung',
'Payment registration not permitted': 'Auszahlungsregistrierung nicht erlaubt',
'Pediatric ICU': 'Kinderklinik ICU',
'Pediatric Psychiatric': 'Kinderpsychiatrie',
'Pediatrics': 'Kinderheilkunde',
'Peer Details': 'Details zu Peers',
'Peer Registration Details': 'Details zur Peer-Registrierung',
'Peer Registration Request': 'Anfrage zu Peer-Registrierung',
'Peer Registration': 'Peer-Registrierung',
'Peer Type': 'Peer Typ',
'Peer UID': 'Peer UID',
'Peer added': 'Peer hinzugefügt',
'Peer deleted': 'Peer gelöscht',
'Peer not allowed to push': 'Peer ist nicht für das pushen von Daten zugelassen',
'Peer registration request added': 'Anfrage zu Peer-Registrierung hinzugefügt',
'Peer registration request deleted': 'Anfrage zu Peer-Registrierung gelöscht',
'Peer registration request updated': 'Anfrage zu Peer-Registrierung aktualisiert',
'Peer updated': 'Peer aktualisiert',
'Peer': 'Peer',
'Pending Payments': 'Anstehende Auszahlungen',
'Pending Requests': 'Anstehende Anfragen',
'Pending': 'Anstehend',
'People Needing Food': 'Personen die Nahrungsmittel brauchen',
'People Needing Shelter': 'Personen die Unterkünfte brauchen',
'People Needing Water': 'Personen die Wasser brauchen',
'People Reservation': 'Gruppe reservieren',
'People Registration': 'Person registrieren',
'People Trapped': 'Eingeschlossene Personen',
'People': 'Personen',
'Performance Rating': 'Ergebnisbeurteilung',
'Permanent Home Address': 'Dauerhafte Heimatadresse',
'Person 1, Person 2 are the potentially duplicate records': 'Person 1 und Person 2 sind möglicherweise Duplikate',
'Person De-duplicator': 'Dubletten in Personen auflösen',
'Person Details': 'Details zur Person',
'Person Registry': 'Personendatenbank',
'Person added to Group': 'Person zur Gruppe hinzugefügt',
'Person added to Team': 'Person zum Team hinzugefügt',
'Person added': 'Person hinzugefügt',
'Person deleted': 'Person gelöscht',
'Person details updated': 'Details zur Person aktualisiert',
'Person interviewed': 'Person befragt',
'Person not found': 'Person nicht gefunden',
'Person or OU': 'Person oder Organisationseinheit',
'Person shall not receive allowance payments when this flag is set': 'Der Person soll kein Taschengeld ausgezahlt werden wenn diese Flagge gesetzt ist',
'Person who has actually seen the person/group.': 'Person, die kürzlich die Person/Gruppe gesehen hat',
'Person/Group': 'Person/Gruppe',
'Personal Data': 'Persönliche Daten',
'Personal Effects Details': 'Details zur persönlichen Habe',
'Personal Effects': 'Persönliche Habe',
'Personal Map': 'Persönliche Karte',
'Personal Profile': 'Persönliches Profil',
'Personal impact of disaster': 'Persönliche Auswirkung der Katastrophe',
'Persons in institutions': 'Personen in Institutionen',
'Persons with disability (mental)': 'Personen mit Behinderungen (psychischen)',
'Persons with disability (physical)': 'Personen mit Behinderungen (körperlichen)',
'Person': 'Person',
'Persons by Age Group': 'Personen nach Altersgruppen',
'Persons by Gender': 'Personen nach Geschlecht',
'Persons': 'Personen',
'Phone 1': 'Telefon 1',
'Phone 2': 'Telefon 2',
'Phone #': 'Telefon #',
'Phone': 'Telefon',
'Phone/Business': 'Telefon/Geschäftlich',
'Phone/Emergency': 'Telefon/Notfall',
'Phone/Exchange (Switchboard)': 'Telefon/Exchange (Hauptschalttafel)',
'Photo Details': 'Foto Details',
'Photo Taken?': 'Foto gemacht?',
'Photo added': 'Foto hinzugefügt',
'Photo deleted': 'Foto gelöscht',
'Photo updated': 'Foto aktualisiert',
'Photo': 'Foto',
'Photograph': 'Fotografie',
'Photos': 'Fotos',
'Physical Description': 'Physische Beschreibung',
'Physical Safety': 'Physische Sicherheit',
'Picture upload and finger print upload facility': 'Einrichtung um Foto und Fingerabdruck hochzuladen',
'Picture': 'Bild',
'Place of Recovery': 'Ort der Wiederherstellung',
'Place on Map': 'Auf Karte plazieren',
'Places for defecation': 'Plätze für Kotablagerung',
'Places the children have been sent to': 'Orte an die Kinder geschickt wurden',
'Planned': 'Geplant',
'Planned on': 'Geplant am',
'Planned From': 'Geplant ab',
'Planned Until': 'Geplant bis',
'Planning': 'In Planung',
'Playing': 'Wiedergabe',
'Please correct all errors.': 'Korrigieren Sie bitte alle Fehler.',
'Please enter a first name': 'Bitte geben Sie den Vornamen ein',
'Please enter a site OR a location': 'Bitte geben Sie eine Stelle oder einen Standort/Gebiet an',
'Please enter the first few letters of the Person/Group for the autocomplete.': 'Bitte geben sie die ersten Buchstaben der Person/Gruppe ein um die Autovervollständigung zu starten.',
'Please enter the recipient': 'Bitte geben sie den Empfänger ein',
'Please fill this!': 'Bitte hier einfüllen!',
'Please provide the URL of the page you are referring to, a description of what you expected to happen & what actually happened.': 'Bitte geben Sie die URL der Seite auf die sie sich beziehen, eine Beschreibung dessen, was sie erwartet haben & was wirklich passiert ist.',
'Please report here where you are:': 'Bitte hier angeben, wo sie sich befinden:',
'Please select another level': 'Bitte wählen Sie eine andere Ebene',
'Please select': 'Treffen Sie eine Auswahl',
'Please sign-up with your Cell Phone as this allows us to send you Text messages. Please include full Area code.': 'Bitte melden Sie sich unter Angabe Ihrer Mobilfunknummer an. Das erlaubt uns Ihnen Textnachrichten zu senden. Bitten verwenden Sie die internationale Nummer ein (Deutschland: 0049.... - ohne führende 0).',
'Please specify any problems and obstacles with the proper handling of the disease, in detail (in numbers, where appropriate). You may also add suggestions the situation could be improved.': 'Bitte geben Sie alle Probleme und Hindernisse bei der korrekten Behandlung der Krankheit an, im Detail (in Zahlen, falls zutreffend). Sie können auch Vorschläge machen wie die Situation verbessert werden kann.',
'Please use this field to record any additional information, including a history of the record if it is updated.': 'Bitte dieses Feld verwenden um zusätzliche Informationen zu hinterlegen, einschließlich der Datensatzhistorie, falls dieser aktualisiert wurde.',
'Please use this field to record any additional information, including any Special Needs.': 'Bitte dieses Feld verwenden um zusätzliche Informationen, einschließlich besonderer Anforderungen, zu hinterlegen.',
'Please use this field to record any additional information, such as Ushahidi instance IDs. Include a history of the record if it is updated.': 'Bitte dieses Feld verwenden um zusätzliche Informationen, wie die Ushahidi Vorgangs-ID, zu hinterlegen, einschließlich der Datensatzhistorie, falls dieser aktualisiert wurde.',
'Pledge Support': 'Zusage von Unterstützung',
'PO': 'PO',
'PO Number': 'PO Nummer',
'P0 Number': 'P0 Nummer',
'PoI Types': 'PoI Typen',
'POIS': 'PoIs',
'Point': 'Point',
'Points of Interest': 'Points of Interest',
'Poisoning': 'Vergiftung',
'Poisonous Gas': 'Gasvergiftung',
'Police': 'Polizei',
'Pollution and other environmental': 'Verschmutzung und andere Umwelt',
'Polygon reference of the rating unit': 'Polygonale Abgrenzung der Bewertungseinheit',
'Poor': 'Arm',
'Population (Day)': 'Belegungszahl (Tag)',
'Population (Night)': 'Belegungszahl (Nacht)',
'Population Statistic Details': 'Details zur Bevölkerungsstatistik',
'Population Statistic added': 'Bevölkerungsstatistik hinzugefügt',
'Population Statistic deleted': 'Bevölkerungsstatistik gelöscht',
'Population Statistic updated': 'Bevölkerungsstatistik aktualisiert',
'Population Statistics': 'Bevölkerungsstatistiken',
'Population and number of households': 'Bevölkerungs- und Haushaltsanzahl',
'Population': 'Belegung',
'Popup Fields': 'Popup Felder',
'Popup Label': 'Popup Beschriftung',
'Porridge': 'Haferbrei',
'Port Closure': 'Hafenschließung',
'Port': 'Port',
'Portable App': 'Portable App',
'Position Catalog': 'Stanpunktkatalog',
'Position added': 'Standpunkt hinzugefügt',
'Position deleted': 'Standpunkt gelöscht',
'Position updated': 'Standpunkt aktualisiert',
'Positions': 'Positionen',
'Postcode': 'PLZ',
'Posted on': 'Geposted auf',
'Posts can be either full pages, embedded within other pages or part of a series (for use as news items or blog posts)': 'Posts können entweder komplette Seiten, die in anderen Seiten eingebettet wurden oder Teile einer Serie sein (z.B. zur Nutzung als Newseintrag oder Blog Post)',
'Poultry restocking, Rank': 'Geflügel auffüllen, Rank',
'Poultry': 'Geflügel',
'Pounds': 'Pfund',
'Power Failure': 'Netzausfall',
'Power': 'Stromversorgung',
'Powered by Sahana': 'Powered by Sahana',
'Pre-cast connections': 'Beton Verbindungen',
'Preferred Name': 'Bevorzugter Name',
'Pregnant women': 'Schwangere Frauen',
'Preliminary': 'Vorläufig',
'Presence': 'Anwesenheit',
'Presence Condition': 'Anwesenheitsbedingung',
'Presence Log': 'Anwesenheitsprotokollierung',
'Presence in the shelter': 'Anwesend in Unterkunft',
'Presence required': 'Anwesenheit erforderlich',
'Previous': 'Vorherige',
'Previous Total': 'Vorherige Summe',
'Primary Occupancy': 'Primäre Belegung',
'Priority from 1 to 9. 1 is most preferred.': 'Priorität von 1 bis 9. 1 ist die am meisten bevorzugte.',
'Priority': 'Priorität',
'Privacy': 'Datenschutz',
'Private': 'Privat',
'Problem Administration': 'Verwaltung von Problemen',
'Problem Details': 'Problemdetails',
'Problem Group': 'Problemgruppe',
'Problem Title': 'Problemtitel',
'Problem added': 'Problem hinzugefügt',
'Problem connecting to twitter.com - please refresh': 'Verbindungsproblem zu twitter.com - bitte neu laden',
'Problem deleted': 'Problem gelöscht',
'Problem updated': 'Problem aktualisiert',
'Problem': 'Problem',
'Problems': 'Probleme',
'Procedure': 'Vorgehensweise',
'Process Received Shipment': 'Bearbeiten der erhaltenen Lieferung',
'Process Shipment to Send': 'Vorbereiten der Lieferung zum Versenden',
'Procurement & Logistics cost': 'Kosten für Beschaffung & Logistik',
'Profession': 'Beruf',
'Profile': 'Profil',
'Profile Details': 'Details zum Profil',
'Profile Picture?': 'Profilbild?',
'Program Hours (Month)': 'Programmstunden (Monat)',
'Program Hours (Year)': 'Programmstunden (Jahr)',
'Program': 'Programm',
'Programs': 'Programme',
'Proj4js definition': 'Proj4js Definition',
'Project Details': 'Details zum Projekt',
'Project Name': 'Name des Projekts',
'Project Status': 'Projektstatus',
'Project added': 'Projekt hinzugefügt',
'Project deleted': 'Projekt gelöscht',
'Project has no Lat/Lon': 'Projekt hat keine Geographische Koordinate (lat/lon)',
'Project updated': 'Projekt aktualisiert',
'Project': 'Projekt',
'Projection Details': 'Details zur Kartenprojektion',
'Projection added': 'Kartenprojektion hinzugefügt',
'Projection deleted': 'Kartenprojektion gelöscht',
'Projection updated': 'Kartenprojektion aktualisiert',
'Projection': 'Kartenprojektion',
'Projections': 'Kartenprojektionen',
'Projects': 'Projekte',
'Property reference in the council system': 'Anlage im Behördensystem',
'Proposed': 'Vorgeschlagen',
'Protected resource': 'Geschützte Ressource',
'Protection': 'Schutz',
'Provide Metadata for your media files': 'Stellen Sie Metadaten für Ihre Mediadateien zur Verfügung.',
'Provide an optional sketch of the entire building or damage points. Indicate damage points.': 'Stekllen Sie optional eine Skizze des gesamten Gebäudes oder der beschädigten Objekte. Markieren Sie dabei die beschädigte Stellen.',
'Psychiatrics/Adult': 'Psychiatrie/Erwachsene',
'Psychiatrics/Pediatric': 'Psychiatrie/Kinder',
'Public Event': 'Öffentliche Ereignis',
'Public and private transportation': 'Öffentlicher und privater Transport',
'Public assembly': 'Öffentliche Versammlung',
'Public': 'Öffentlich',
'Publish': 'Veröffentlichen',
'Published On': 'Veröffentlicht am',
'Pull tickets from external feed': 'Tickets von externen Feeds laden',
'Purchase Date': 'Kaufdatum',
'Purchase Price': 'Kaufpreis',
'Purchase': 'Kauf',
'Purpose': 'Zweck',
'Push tickets to external system': 'Transferiere Tickets zu externen System',
'Pyroclastic Flow': 'Pyroklastischer Strom',
'Pyroclastic Surge': 'Pyroklastischer Welle',
'Python Serial module not available within the running Python - this needs installing to activate the Modem': 'Python Serial-Modul ist innerhalb der aktiven Python Umgebung nicht verfügbar - dieses muss installiert werden um das Modem zu aktivieren.',
'Python needs the ReportLab module installed for PDF export': 'Python braucht das ReportLab-Modul für die PDF-Ausgabe. Dies ist derzeit nicht installiert!',
'Quality/Mode': 'Qualität/Modus',
'Quantity Committed': 'Menge bestätigt',
'Quantity Fulfilled': 'Menge erfüllt',
'Quantity range': 'Mengenumfang',
'Quantity Received': 'Erhaltene Menge',
'Quantity Returned': 'Zurückgegebene Menge',
'Quantity Sent': 'Gesendete Menge',
'Quantity in Transit': 'Menge in Transit',
'Quantity': 'Menge',
'Quarantine': 'Quarantäne',
'Queries': 'Abfragen',
'Query': 'Abfrage',
'Queryable?': 'Abfragbar?',
'RC frame with masonry infill': 'RC Rahmen mit Mauerwerkfüllung',
'RECORD A': 'DATENSATZ A',
'RECORD B': 'DATENSATZ B',
'Race': 'Rasse',
'Radio Callsign': 'Radio Rufzeichen',
'Radiological Hazard': 'Strahlungsgefahr',
'Radiology': 'Radiologie',
'Railway Accident': 'Eisenbahnunfall',
'Railway Hijacking': 'Eisenbahnentführung',
'Rain Fall': 'Regenfall',
'Rank when ordering cases by status': 'Rang beim Sortieren von Fällen nach Status',
'Rapid Assessment Details': 'Details zur Schnell-Beurteilung',
'Rapid Assessment added': 'Schnell-Beurteilung hinzugefügt',
'Rapid Assessment deleted': 'Schnell-Beurteilung gelöscht',
'Rapid Assessment updated': 'Schnell-Beurteilung aktualisiert',
'Rapid Assessment': 'Schnell-Beurteilung',
'Rapid Assessments & Flexible Impact Assessments': 'Schnell-Beurteilungen & flexible Abschätzungen der Auswirkungen',
'Rapid Assessments': 'Schnell-Beurteilungen',
'Rapid Close Lead': 'Schnell Führung schliessen',
'Rapid Data Entry': 'Schnelle Dateneingabe',
'Raw Database access': 'Direkter Datenbankzugriff',
'Ready for Transfer': 'Transferbereit',
'Receive New Shipment': 'Neue Lieferung erhalten',
'Receive Shipment': 'Lieferung erhalten',
'Receive this shipment?': 'Lieferung erhalten?',
'Receive': 'Erhalten',
'Received By Person': 'Erhalten von einer Person',
'Received By': 'Erhalten von',
'Received Item Details': 'Details zum erhaltenen Artikel',
'Received Item deleted': 'Erhaltener Artikel gelöscht',
'Received Item updated': 'Erhaltener Artikel aktualisiert',
'Received Shipment Details': 'Details zur erhaltenen Lieferung',
'Received Shipment canceled and items removed from Inventory': 'Erhaltene Lieferung abgebrochen und Artikel aus dem Bestand entfernt',
'Received Shipment canceled': 'Erhaltene Lieferung abgebrochen',
'Received Shipment updated': 'Erhaltene Lieferung aktualisiert',
'Received Shipments': 'Erhaltene Lieferung',
'Received': 'Erhalten',
'Received date': 'Eingangsdatum',
'Received/Incoming Shipments': 'Erhaltene/Einkommende Lieferungen',
'Receiving and Sending Items': 'Erhalten und Versenden von Artikeln',
'Recipient': 'Empfänger',
'Recipients': 'Empfänger',
'Recipient(s)': 'Empfänger',
'Recommendations for Repair and Reconstruction or Demolition': 'Empfehlungen für Reparatur und Wiederherstellung oder Abriß',
'Record Details': 'Details zum Datensatz',
'Record Saved': 'Datensatz gesichert',
'Record added': 'Datensatz hinzugefügt',
'Record any restriction on use or entry': 'Registrieren jeglicher Einschränkung bei der Nutzung oder Eintragung',
'Record deleted': 'Datensatz gelöscht',
'Record last updated': 'Datensatz zuletzt aktualisiert',
'Record not found!': 'Datensatz nicht gefunden!',
'Record not found': 'Datensatz nicht gefunden',
'Record updated': 'Datensatz aktualisiert',
'Record': 'Datensatz',
'Recording and Assigning Assets': 'Aufzeichnen und Zuweisen von Anlagen',
'Records': 'Datensätze',
'Recovery Request added': 'Bergungsanfrage hinzugefügt',
'Recovery Request deleted': 'Bergungsanfrage gelöscht',
'Recovery Request updated': 'Bergungsanfrage aktualisiert',
'Recovery Request': 'Bergungsanfrage',
'Recovery Requests': 'Bergungsanfragen',
'Recovery': 'Bergung',
'Recurring Cost': 'Wiederkehrende Kosten',
'Recurring Request?': 'Wiederkehrende Anfrage?',
'Recurring cost': 'Wiederkehrende Kosten',
'Recurring costs': 'Wiederkehrende Kosten',
'Recurring': 'Wiederkehrend',
'Red Cross / Red Crescent': 'Rotes Kreuz / Roter Halbmond',
'Red': 'Rot',
'Reference Document': 'Referenzdokument',
'Refresh Rate (seconds)': 'Aktualisierungsrate (Sekunden)',
'Refugees': 'Flüchtlinge',
'Refugee Support Database': 'Flüchtlingshilfe-Datenbank',
'Region': 'Regierungsbezirk',
'Region Location': 'Standort Region',
'Regional': 'Regional',
'Regions': 'Regionen',
'Register Person into this Camp': 'Registrieren der Person in dieses Camp',
'Register Person into this Shelter': 'Registrieren der Person in diese Unterkunft',
'Register Person': 'Registrieren einer Person',
'Register them as a volunteer': 'Als Freiwillige registrieren',
'Register': 'Registrieren',
'Register As': 'Registrieren als',
'Registered People': 'Registrierte Personen',
'Registered users can': 'Registrierte Benutzer können',
'Registered by': 'Registriert von',
'Registered on': 'Registriert am',
'Registration Date': 'Registriert am',
'Registration Details': 'Details zur Registrierung',
'Registration added': 'Registrierung hinzugefügt',
'Registration entry deleted': 'Anmeldungseintrag gelöscht',
'Registration is still pending approval from Approver (%s) - please wait until confirmation received.': 'Die Registrierung wartet noch auf die Genehmigung von der Qualifizierenden Stelle (%s) - bitte warten Sie bis Sie eine Bestätigung erhalten',
'Registration not found': 'Registrierung nicht gefunden',
'Registration updated': 'Anmeldung aktualisiert',
'Registration': 'Registrierung',
'Rehabilitation/Long Term Care': 'Rehabilitation/Langfristige Pflege',
'Reinforced masonry': 'Mauerwerk verstärkt',
'Rejected': 'Zurückgewiesen',
'Relationship': 'Beziehung',
'Relief Team': 'Unterstützungsteam',
'Relief': 'Unterstützung',
'Religious Leader': 'Religiöser Führer',
'Religious': 'Religiös',
'Relocate as instructed in the <instruction>': 'Verlagern wie in der <instruction> angewiesen',
'Remarks': 'Bemerkungen',
'Remove Asset from this event': 'Anlage von diesem Ereignis entfernen',
'Remove Asset from this scenario': 'Anlage von diesem Szenario entfernen',
'Remove Facility from this event': 'Einrichtung von diesem Ereignis entfernen',
'Remove Facility from this scenario': 'Einrichtung von diesem Szenario entfernen',
'Remove Family Member': 'Familienmitglied entfernen',
'Remove Human Resource from this event': 'Personelle Ressource von diesem Ereignis entfernen',
'Remove Human Resource from this scenario': 'Personelle Ressource von diesem Szenario entfernen',
'Remove Incident Type from this event': 'Vorfallstyp von diesem Ereignis entfernen',
'Remove Item from Inventory': 'Artikel aus Bestand entfernen',
'Remove Layer from Profile': 'Löschen der Kartenebene aus dem Profil',
'Remove Map Profile from this event': 'Kartenkonfiguration von diesem Ereignis entfernen',
'Remove Map Profile from this scenario': 'Kartenkonfiguration von diesem Szenario entfernen',
'Remove Person from Group': 'Person aus Gruppe entfernen',
'Remove Person from Team': 'Person aus Team entfernen',
'Remove existing data before import': 'Löschen der existierenden Daten vor dem Import',
'Remove this asset from this event': 'Diese Anlage vom Ereignis entfernen',
'Remove this asset from this scenario': 'Diese Anlage vom Szenario entfernen',
'Remove': 'Entfernen',
'Removed from Group': 'Aus Gruppe entfernt',
'Removed from Team': 'Aus Team entfernt',
'Repacked By': 'Umgepackt von',
'Repair': 'Reparieren',
'Repairs': 'Reparaturen',
'Repaired': 'Repariert',
'Repeat your password': 'Kennwort wiederholen',
'Replace if Master': 'Ersetzen wenn Master',
'Replace if Newer': 'Ersetze, falls neuer',
'Replace': 'Ersetzen',
'Report Another Assessment...': 'Melde andere Beurteilung...',
'Report Details': 'Details zum Bericht',
'Report Options': 'Optionen zum Bericht',
'Report Options': 'Optionen zum Bericht:',
'Report Types Include': 'Berichtstypen beinhalten',
'Report added': 'Bericht hinzugefügt',
'Report created': 'Bericht angelegt',
'Report deleted': 'Bericht gelöscht',
'Report my location': 'Meinen Standort melden',
'Report of': 'Bericht von',
'Report the contributing factors for the current EMS status.': 'Melde die beitragenen Faktoren für den aktuellen EMS Status',
'Report the contributing factors for the current OR status.': 'Melde die beitragenden Faktoren für den aktuellen OR Status.',
'Report them as found': 'Als gefunden melden',
'Report them missing': 'Als vermisst melden',
'Report updated': 'Bericht aktualisiert',
'Report': 'Bericht',
'Report To': 'Melden bei',
'Reported To': 'Gemeldet bei',
'Reported Transferable': 'Transferierbar gemeldet',
'Reporter Name': 'Name des Meldenden',
'Reporter': 'Meldender',
'Reporting on the projects in the region': 'Berichterstattung über die Projekte in der Region',
'Reports': 'Berichte',
'Repositories': 'Repositories',
'REQ': 'Anfrage',
'REQ Number': 'Anfragenummer',
'RSS Channels': 'RSS Kanäle',
'RSS Posts': 'RSS Posts',
'Request Added': 'Anfrage hinzugefügt',
'Request Canceled': 'Anfrage storniert',
'Request Details': 'Details zur Anfrage',
'Request Templates': 'Anfragevorlagen',
'Requested For Facility': 'Angefragt für Einrichtung',
'Request From': 'Anfrage von',
'Request Item Details': 'Details zur Anfrage nach Artikel',
'Request Item added': 'Anfrage nach Artikel hinzugefügt',
'Request Item deleted': 'Anfrage nach Artikel entfernt',
'Request Item from Available Inventory': 'Anfrage nach Artikel aus verfügbarem Bestand',
'Request Item updated': 'Anfrage nach Artikel aktualisiert',
'Request Item': 'Angefragter Artikel',
'Request Items': 'Angefragte Artikel',
'Request Status': 'Anfragestatus',
'Request Type': 'Anfragetyp',
'Request Updated': 'Anfrage aktualisiert',
'Request added': 'Anfrage hinzugefügt',
'Request deleted': 'Anfrage gelöscht',
'Request for Role Upgrade': 'Rollenupgrade anfordern',
'Request updated': 'Anfrage aktualisiert',
'Request': 'Anfrage',
'Requests': 'Anfragen',
'Request, Response & Session': 'Anfrage, Antwort & Sitzung',
'Requested By Facility': 'Angefragt von Einrichtung',
'Requested By': 'Angefragt durch',
'Requested From': 'Angefragt von',
'Requested Items': 'Angefragte Artikel',
'Requested Skills': 'Angefragte Fähigkeiten',
'Requested by': 'Angefragt durch',
'Requested on': 'Angefragt am',
'Requested': 'Angefragt',
'Requester': 'Anfragender',
'Requests Management': 'Anfragenverwaltung',
'Requests': 'Anfragen',
'Required Skills': 'Benötigte Fähigkeiten',
'Requires Login!': 'Anmeldung erforderlich!',
'Rescue and recovery': 'Rettung und Bergung (SAR)',
'Reset Password': 'Kennwort zurücksetzen',
'Reset': 'Zurücksetzen',
'Residents': 'Bewohner',
'Residents Report': 'Bewohnerliste',
'Residents Reports': 'Bewohnerlisten',
'Residents Report created': 'Bewohnerliste angelegt',
'Residents Report updated': 'Bewohnerliste aktualisiert',
'Residents Report deleted': 'Bewohnerliste gelöscht',
'Resolve Conflict': 'Konflikt lösen',
'Resolve link brings up a new screen which helps to resolve these duplicate records and update the database.': 'Das verfolgen des Links lässt eine neue Anzeige erscheinen die hilft doppelte Einträge aufzulösen und die Datenbank zu aktualisieren',
'Resolve': 'Auflösen',
'Resource Details': 'Details zur Ressource',
'Resource Inventory': 'Ressourcenbestand',
'Resource Type': 'Ressourcentyp',
'Resource added': 'Ressource hinzugefügt',
'Resource deleted': 'Ressource gelöscht',
'Resource updated': 'Ressource aktualisiert',
'Resource': 'Ressource',
'Resources': 'Ressourcen',
'Respiratory Infections': 'Atemwegsinfektionen',
'Response': 'Antwort',
'Restricted Access': 'Eingeschränkter Zugriff',
'Restricted Use': 'Eingeschränkte Verwendung',
'Result': 'Ergebniss',
'Results': 'Ergebnisse',
'Retail Crime': 'Einzelhandel Kriminalität',
'Retrieve Password': 'Kennwort abrufen',
'Return to Request': 'Zurück zur Anfrage',
'Return': 'Zurück',
'Returned From': 'Zurückgegeben von',
'Returned': 'Zurückgegeben',
'Review Incoming Shipment to Receive': 'Überprüfung der eingehenden Lieferung für die Annahme',
'Rice': 'Reis',
'Rich Text?': 'Rich Text?',
'Riot': 'Aufruhr',
'River Details': 'Details zum Fluss',
'River added': 'Fluss hinzugefügt',
'River deleted': 'Fluss gelöscht',
'River updated': 'Fluss aktualisiert',
'River': 'Fluss',
'Rivers': 'Flüsse',
'Road Accident': 'Verkehrsunfall',
'Road Closed': 'Straße gesperrt',
'Road Conditions': 'Zustand der Straßen',
'Road Delay': 'Verkehrsverzögerung',
'Road Hijacking': 'Straßenentführung',
'Road Usage Condition': 'Strassennutzungszustand',
'Role Details': 'Details zur Rolle',
'Role Name': 'Name der Rolle',
'Role Required': 'Erforderliche Rolle',
'Role Updated': 'Rolle aktualisiert',
'Role added': 'Rolle hinzugefügt',
'Role deleted': 'Rolle gelöscht',
'Role updated': 'Rolle aktualisiert',
'Role': 'Rolle',
'Role-based': 'Rollenbasiert',
'Roles Permitted': 'Zulässige Rollen',
'Roles': 'Rollen',
'Roll On Roll Off Berth': 'Fähranlegestelle',
'Roof tile': 'Dachziegel',
'Roofs, floors (vertical load)': 'Dächer, Böden (vertikale Belastung)',
'Room Details': 'Details zum Raum',
'Room added': 'Raum hinzugefügt',
'Room deleted': 'Raum gelöscht',
'Room updated': 'Raum aktualisiert',
'Room': 'Raum',
'Room No.': 'Raum-Nr.',
'Rooms': 'Räume',
'Rows in table': 'Zeilen in der Tabelle',
'Rows selected': 'Ausgewählte Zeilen',
'Run Interval': 'Intervall der Läufe',
'Runway Length (m)': 'Länge der Landebahn (m)',
'Runway Surface': 'Oberfläche der Landebahn',
'Runway Width (m)': 'Breite der Landebahn (m)',
'Running Cost': 'Laufzeitkosten',
'SMS Modem Channels': 'SMS Modem Kanäle',
'SMS Outbound Gateways': 'SMS Ausgangsgateaways',
'SMS SMTP Channels': 'SMS SMTP Kanäle',
'SMS WebAPI Channels': 'SMS WebAPI Kanäle',
'Safe environment for vulnerable groups': 'Sichere Umgebung für gefährdete Gruppen',
'Safety Assessment Form': 'Formular für Sicherheitsbeurteilung',
'Safety of children and women affected by disaster?': 'Ist die Sicherheit von Kindern und Frauen durch die Katastrophe (resp. das Unglück) beeinträchtigt?',
'Sahana Blue': 'Sahana Blau',
'Sahana Community Chat': 'Sahana Gemeinschaft Chat',
'Sahana Eden <=> Other': 'Sahana Eden <=> Andere',
'Sahana Eden Humanitarian Management Platform': 'Sahana Eden - OpenSource Management-Plattform für humanitäre Notsituationen',
'Sahana Eden Website': 'Sahana Eden Internetseite',
'Sahana Steel': 'Sahana Stahl',
'Sahana access granted': 'Sahana Zugriff gewährt',
'Salted Fish': 'Gesalzener Fisch',
'Sanitation problems': 'Sanitäre Probleme',
'Satellite': 'Satellit',
'Saturday': 'Samstag',
'Save: Default Lat, Lon & Zoom for the Viewport': 'Speichern: Standardmäßig Länge/Breite und Zoomfaktor',
'Save': 'Speichern',
'Saved.': 'Gespeichert.',
'Saved Filters': 'Gespeicherte Filter',
'Saving...': 'Wird gespeichert...',
'Scale of Results': 'Umfang der Ergebnisse',
'Scan with Zxing': 'Scannen mit Zxing',
'Scenario Details': 'Details zum Szenario',
'Scenario added': 'Szenario hinzugefügt',
'Scenario deleted': 'Szenario gelöscht',
'Scenario updated': 'Szenario aktualisiert',
'Scenario': 'Szenario',
'Scenarios': 'Szenarios',
'Schedule': 'Zeitplan',
'School Closure': 'Schulschließung',
'School Lockdown': 'Schule geschlossen',
'School Teacher': 'Schullehrer',
'School activities': 'Schulaktivitäten',
'School assistance': 'Schulunterstützung',
'School attendance': 'Schulbesuch',
'School destroyed': 'Schule zerstört',
'School heavily damaged': 'Schule stark beschädigt',
'School tents received': 'Schulzelte erhalten',
'School tents, source': 'Herkunft der Schulzelte',
'School used for other purpose': 'Schule wird für andere Zwecke verwendet',
'School': 'Schule',
'School/studying': 'Schule/lernen',
'Schools': 'Schulen',
'Seaports': 'Seehafen',
'Search Activities': 'Suchaktivitäten',
'Search Activity Report': 'Bericht über Suchaktivitäten',
'Search Addresses': 'Suche nach Adressen',
'Search All Requested Items': 'Alle angefordeten Artikel durchsuchen',
'Search All Requested Skills': 'Alle angefragten Fähigkeiten durchsuchen',
'Search Alternative Items': 'Suche nach alternativen Artikeln',
'Search Assessment Summaries': 'Suche Beurteilungszusammenfassungen',
'Search Assessments': 'Suche Beurteilungen',
'Search Asset Log': 'Suche Anlageprotokoll',
'Search Assets': 'Suche Anlagen',
'Search Baseline Type': 'Referenzdatumstyp suchen',
'Search Baselines': 'Referenzdatum suchen',
'Search Brands': 'Marken suchen',
'Search Budgets': 'Budgets suchen',
'Search Bundles': 'Produktpakete suchen',
'Search Camp Services': 'Camp Leistungen suchen',
'Search Camp Types': 'Camp Typen suchen',
'Search Camps': 'Camps suchen',
'Search Catalog Items': 'Katalog Einträge suchen',
'Search Catalogs': 'Kataloge suchen',
'Search Certificates': 'Zertifikate suchen',
'Search Certifications': 'Zertifizierungen suchen',
'Search Checklists': 'Checklisten suchen',
'Search Cluster Subsectors': 'Cluster Teilbereiche suchen',
'Search Clusters': 'Cluster suchen',
'Search Commitment Items': 'Zugesagte Artikel suchen',
'Search Commitments': 'Zusagen suchen',
'Search Competencies': 'Kompetenzen suchen',
'Search Competency Ratings': 'Kompetenzeinstufungen suchen',
'Search Contact Information': 'Nach Kontaktinformationen suchen',
'Search Contacts': 'Nach Kontakten suchen',
'Search Course Certificates': 'Suchen nach Kurszertifikaten',
'Search Courses': 'Kurse suchen',
'Search Credentials': 'Qualifikationen suchen',
'Search Documents': 'Dokumente suchen',
'Search Donors': 'Spender suchen',
'Search Entries': 'Einträge suchen',
'Search Events': 'Ereignisse suchen',
'Search Facilities': 'Einrichtungen suchen',
'Search Feature Layers': 'Objekt-Ebenen suchen',
'Search Flood Reports': 'Flutberichte suchen',
'Search Groups': 'Gruppen suchen',
'Search Human Resources': 'Personelle Ressourcen suchen',
'Search Identity': 'Identität suchen',
'Search Images': 'Bilder suchen',
'Search Impact Type': 'Auswirkungstypen suchen',
'Search Impacts': 'Auswirkungen suchen',
'Search Incident Reports': 'Vorfallberichte suchen',
'Search Inventory Items': 'Bestandsartikel suchen',
'Search Inventory items': 'Bestandsartikel suchen',
'Search Item Categories': 'Artikelkategorien suchen',
'Search Item Packs': 'Artikelpakete suchen',
'Search Items': 'Artikel suchen',
'Search Job Roles': 'Tätigkeiten suchen',
'Search Keys': 'Sschlüssel suchen',
'Search Kits': 'Ausstattungen (Kits) suchen',
'Search Layers': 'Kartenebenen suchen',
'Search Level 1 Assessments': 'Suche Stufe 1 Beurteilungen',
'Search Level 2 Assessments': 'Suche Stufe 2 Beurteilungen',
'Search Locations': 'Gebiet/Standort suchen',
'Search Log Entry': 'Protokolleintrag suchen',
'Search Map Profiles': 'Kartenkonfiguration suchen',
'Search Markers': 'Marker/Symbol suchen',
'Search Members': 'Mitglied suchen',
'Search Membership': 'Mitgliedschaft suchen',
'Search Missions': 'Aufträge suchen',
'Search Need Type': 'Anforderungstyp suchen',
'Search Needs': 'Anforderungstyp suchen',
'Search Offices': 'Büros suchen',
'Search Organizations': 'Organisationen suchen',
'Search Peer': 'Peer Suchen',
'Search Personal Effects': 'Persönliche Habe suchen',
'Search Persons': 'Personen suchen',
'Search Photos': 'Fotos suchen',
'Search Population Statistics': 'Bevölkerungsstatistiken suchen',
'Search Positions': 'Positionen suchen',
'Search Problems': 'Probleme suchen',
'Search Projections': 'Kartenprojektionen suchen',
'Search Projects': 'Projekte suchen',
'Search Queries': 'Suchabfragen',
'Search Rapid Assessments': 'Schnell-Beurteilung suchen',
'Search Received Items': 'Erhaltene Artikel suchen',
'Search Received Shipments': 'Erhaltene Lieferungen suchen',
'Search Records': 'Datensätze suchen',
'Search Registrations': 'Registrierungen suchen',
'Search Registration Request': 'Registrierungsanfragen suchen',
'Search Report': 'Berichte suchen',
'Search Request Items': 'Angefragte Artikel suchen',
'Search Request': 'Anfrage suchen',
'Search Requested Items': 'Angefragte Artikel suchen',
'Search Requests': 'Anfragen suchen',
'Search Resources': 'Ressourcen suchen',
'Search Rivers': 'Flüsse suchen',
'Search Roles': 'Rollen suchen',
'Search Rooms': 'Räume suchen',
'Search Scenarios': 'Szenarien suchen',
'Search Sections': 'Abschnitte suchen',
'Search Sectors': 'Bereiche suchen',
'Search Sent Items': 'Gesendete Artikel suchen',
'Search Sent Shipments': 'Gesendete Lieferungen suchen',
'Search Service Profiles': 'Leistungsprofile suchen',
'Search Settings': 'Sucheinstellungen',
'Search Shelter Services': 'Unterkunftsleistungen suchen',
'Search Shelter Types': 'Unterkunftsarten suchen',
'Search Shelters': 'Unterkünfte suchen',
'Search Shipped Items': 'Suche über gelieferte Artikel',
'Search Skill Equivalences': 'Fähigkeits-Vergleichbarkeiten suchen',
'Search Skill Provisions': 'Fähigkeits-Bereitstellungen suchen',
'Search Skill Types': 'Fähigkeitstypen suchen',
'Search Skills': 'Fähigkeiten suchen',
'Search Solutions': 'Lösungen suchen',
'Search Staff Types': 'Mitarbeitertypen suchen',
'Search Staff or Volunteer': 'Suche Mitarbeiter oder Freiwillige',
'Search Status': 'Status suchen',
'Search Subscriptions': 'Abonnement suchen',
'Search Subsectors': 'Teilbereiche suchen',
'Search Support Requests': 'Unterstützungsanfragen suchen',
'Search Tasks': 'Aufgaben suchen',
'Search Teams': 'Teams suchen',
'Search Themes': 'Themen suchen',
'Search Tickets': 'Tickets suchen',
'Search Tracks': 'Tracks suchen',
'Search Training Participants': 'Suche Kursteilnehmer',
'Search Trainings': 'Schulung suchen',
'Search Twitter Tags': 'Twitter-Tags suchen',
'Search Units': 'Einheiten suchen',
'Search Users': 'Benutzer suchen',
'Search Volunteer Availability': 'Verfügbarkeit von Freiwilligen suchen',
'Search Volunteers': 'Freiwillige suchen',
'Search Warehouses': 'Warenlager suchen',
'Search and Edit Group': 'Suchen und Bearbeiten von Gruppen',
'Search and Edit Individual': 'Suchen und Bearbeiten von einzelnen Personen',
'Search by Skills': 'Suche nach Fähigkeiten',
'Search by skills': 'Suche nach Fähigkeiten',
'Search for Staff or Volunteers': 'Suche nach Mitarbeitern oder Freiwilligen',
'Search for a Location by name, including local names.': 'Suchen nach Standortnamen, einschließlich lokaler Namen.',
'Search for a Person': 'Such nach einer Person',
'Search for a Project': 'Suche nach einem Projekt',
'Search for a shipment by looking for text in any field.': 'Suche nach einer Lieferung (Volltextsuche)',
'Search for a shipment received between these dates': 'Suche nach einer erhaltenen Lieferung im Zeitraum',
'Search for an Organization by name or acronym': 'Suche nach einer Organisation nach Namen oder Abkürzung',
'Search for an Organization by name or acronym.': 'Suche nach einer Organisation in Namen und Acronym.',
'Search for an asset by text.': 'Suche Anlage über Text.',
'Search for an item by category.': 'Suche Artikel nach Kategorie.',
'Search for an item by text.': 'Suche Artikel über Text.',
'Search for asset by country.': 'Suche Anlage nach Ländern.',
'Search for office by country.': 'Suche Büro nach Ländern.',
'Search for office by organization.': 'Suche Büro nach Organisation.',
'Search for office by text.': 'Suche Büro über Text',
'Search for Persons': 'Suche nach Personen',
'Search for warehouse by country.': 'Suche Warenlager nach Ländern',
'Search for warehouse by organization.': 'Suche Warenlager nach Organisation',
'Search for warehouse by text.': 'Suche Warenlager über Text',
'Search here for a person record in order to:': 'Hier nach einem Personendatensatz suchen, um zu:',
'Search location in Geonames': 'Ortssuche in Geonames',
'Search messages': 'Suche Nachrichten',
'Search': 'Suchen',
'Searching for different groups and individuals': 'Suche nach verschiedenen Gruppen und Einzelpersonen',
'Secondary Server (Optional)': 'Sekundärer Server (optional)',
'Seconds must be a number between 0 and 60': 'Sekunden müssen eine Zahl zwischen 0 und 60 sein',
'Section Details': 'Details zum Abschnitt',
'Section deleted': 'Abschnitt gelöscht',
'Section updated': 'Abschnitt aktualisiert',
'Sections': 'Abschnitte',
'Sector Details': 'Details zum Bereich ',
'Sector added': 'Bereich hinzugefügt',
'Sector deleted': 'Bereich gelöscht',
'Sector updated': 'Bereich aktualisiert',
'Sector': 'Bereich',
'Sector(s)': 'Bereich(e)',
'Sectors': 'Bereiche',
'Secure Storage Capacity': 'Sichere Lagerkapazität',
'Security Status': 'Sicherheitsstatus',
'Security problems': 'Sicherheitsprobleme',
'Security': 'Sicherheit',
'See All Entries': 'Siehe alle Einträge',
'See all': 'Alles anzeigen',
'See unassigned recovery requests': 'Siehe nicht zugeordnete Bergungsanfragen.',
'Select': 'Auswahl',
'Select All': 'Alles auswählen',
'Select Items from the Request': 'Wählen sie Artikel aus der Anfrage',
'Select Items from this Inventory': 'Wählen sie Artikel aus diesem Bestand',
'Select Land': 'Land auswählen',
'Select Modules for translation': 'Auswahl der Module zum Übersetzen',
'Select a location': 'Wählen Sie einen Ort aus',
'Select a question from the list': 'Wählen sie eine Frage aus der Liste aus',
'Select a range for the number of total beds': 'Wählen sie einen Bereich für die Gesamtanzahl von Betten',
'Select all that apply': 'Wählen Sie alles Zutreffende aus',
'Select an Organization to see a list of offices': 'Wählen Sie eine Organisation aus, um eine Liste der zugehörigen Büros anzuzeigen.',
'Select resources to import': 'Wählen Sie Ressourcen zum Importieren aus',
'Select the overlays for Assessments and Activities relating to each Need to identify the gap.': 'Wählen sie die overlays für die Beurteilungen und die zugehörigen Aktivitäten um die Differenz zu identifizieren.',
'Select the person assigned to this role for this project.': 'Wählen Sie die Person die mit diesr Rolle dem Projekt zugeordnet werden soll.',
'Select to show this configuration in the Regions menu.': "Auswahl um sich diese Konfiguration im Menu 'Regionen' anzeigen.",
'Selects whether to use a Modem, Tropo or other Gateway for sending out SMS': 'Auswahl ob ein Modem, Tropo oder eine andere Schnittstelle zum Versand von SMS verwendet werden soll.',
'Send Alerts using Email &/or SMS': 'Senden von Alarmen unter Nutzung von E-Mail und/oder SMS',
'Send Commitment as Shipment': 'Zusage Lieferung zu senden',
'Send Message': 'Nachricht senden',
'Send New Shipment': 'Neue Lieferung senden',
'Send Notification': 'Benachrichtigung senden',
'Send Shipment': 'Lieferung senden',
'Send Task Notification': 'Auftragsbenachrichtigung senden',
'Send a message to this person': 'Dieser Person eine Nachricht senden',
'Send a message to this team': 'Diesem Team eine Nachricht senden',
'Send from %s': 'Senden von %s',
'Send message': 'Nachricht senden',
'Send new message': 'Neue Nachricht senden',
'Send': 'Senden',
'Sends & Receives Alerts via Email & SMS': 'Schickt & empfängt Benachrichtigungen über Email und SMS',
'Sent By Person': 'Gesendet von einer Person',
'Sent By': 'Gesendet von',
'Sent Emails': 'Gesendete E-Mails',
'Sent Item Details': 'Details zum versendeten Artikel',
'Sent Item deleted': 'Gesendeter Artikel gelöscht',
'Sent Item updated': 'Gesendeter Artikel aktualisiert',
'Sent Posts': 'Gesendete Posts',
'Sent Shipment Details': 'Details zur gesendeten Lieferungsdetails',
'Sent Shipment canceled and items returned to Inventory': 'Gesendete Lieferung storniert und Artikel zum Lager zurückgebracht',
'Sent Shipment canceled': 'Gesendete Lieferung storniert',
'Sent Shipment updated': 'Gesendete Lieferung aktualisiert',
'Sent Shipments': 'Gesendete Lieferungen',
'Sent SMS': 'Gesendete SMS',
'Sent to RP': 'Zu RP geschickt',
'Sent date': 'Versanddatum',
'Sent': 'gesendet',
'Separated children, caregiving arrangements': 'von Eltern getrennte Kinder, Pflegevereinbarungen',
'Serial Number': 'Seriennummer',
'Series': 'Serie',
'Server': 'Server',
'Service Catalog': 'Leistungskatalog',
'Service Record': 'Leistungseintrag',
'Service or Facility': 'Leistung oder Einrichtung',
'Service profile added': 'Leistungsprofil hinzugefügt',
'Service profile deleted': 'Leistungsprofil gelöscht',
'Service profile updated': 'Leistungsprofil aktualisiert',
'Service': 'Leistung',
'Services Available': 'Verfügbare Leistungen',
'Services': 'Leistungen',
'Set Base Site': 'Basisstandort festlegen',
'Set By': 'Definiert durch',
'Set True to allow editing this level of the location hierarchy by users who are not MapAdmins.': "Wählen sie 'Wahr' um Benutzern, die nicht Karten-Admins sind, zu erlauben dieses Level der Gebietshierachie zu verändern.",
'Setting Details': 'Details konfigurieren',
'Setting added': 'Einstellung hinzugefügt',
'Setting deleted': 'Einstellungen gelöscht',
'Setting updated': 'Einstellung aktualisiert',
'Settings updated': 'Einstellungen aktualisiert',
'Settings were reset because authenticating with Twitter failed': 'Einstellungen wurden zurückgesetzt da die Authentifizierung mit Twitter fehlgeschlagen ist',
'Settings which can be configured through the web interface are available here.': 'Die Einstellungen, die über das Webinterface konfiguriert werden können, sind hier verfügbar.',
'Settings': 'Einstellungen',
'Severe': 'Ernsthaft',
'Severity': 'Wertigkeit',
'Sex': 'Geschlecht',
'Share a common Marker (unless over-ridden at the Feature level)': 'Definiere einen allgemeinen Marker/Symbol (kann auf Objekt-Ebene überschrieben werden)',
'Shelter & Essential NFIs': 'Unterkünfte & Essentielle NFIs',
'Shelter Details': 'Details zur Unterkunft',
'Shelter Name': 'Name der Unterkunft',
'Shelter Registration Status': 'Registrierungsstatus',
'Shelter Registry': 'Unterkunft Register',
'Shelter Service Details': 'Details zur Unterkunftsleistung',
'Shelter Service added': 'Unterkunftsleistung hinzugefügt',
'Shelter Service deleted': 'Unterkunftsleistung gelöscht',
'Shelter Service updated': 'Unterkunftsleistung aktualisiert',
'Shelter Service': 'Unterkunftsleistung',
'Shelter Services': 'Unterkunftsleistungen',
'Shelter Settings': 'Eigenschaften der Unterkunft',
'Shelter Type Details': 'Details zum Unterkunftstyp',
'Shelter Type added': 'Unterkunftstyp hinzugefügt',
'Shelter Type deleted': 'Unterkunftstyp gelöscht',
'Shelter Type updated': 'Unterkunftstyp aktualisiert',
'Shelter Type': 'Unterkunftstyp',
'Shelter Types and Services': 'Unterkunftstypen und -leistungen',
'Shelter Types': 'Unterkunftstypen',
'Shelter added': 'Unterkunft hinzugefügt',
'Shelter deleted': 'Unterkunft gelöscht',
'Shelter updated': 'Unterkunft aktualisiert',
'Shelter': 'Unterkunft',
'Shelter/NFI Assistance': 'Unterkunft/ NFI Hilfe',
'Shelters': 'Unterkünfte',
'Shipment Created': 'Lieferung erstellt',
'Shipment Items received by Inventory': 'Lieferungsartikel aus Bestand empfangen',
'Shipment Items sent from Inventory': 'Lieferungsartikel von Bestand gesendet',
'Shipment Items': 'Lieferungsartikel',
'Shipment Type': 'Typ der Lieferung',
'Shipment to Send': 'Zu sendende Lieferung zu senden',
'Shipments To': 'Lieferungen nach',
'Shipments': 'Lieferungen',
'Shipping cost': 'Lieferkosten',
'Shooting': 'Filmaufnahme',
'Short Assessment': 'Kurz Beurteilung',
'Short Description': 'Kurzbeschreibung',
'Show %(number)s entries': 'Zeige %(number)s Einträge',
'Show Checklist': 'Checkliste anzeigen',
'Show Details': 'Details anzeigen',
'Show handling instructions at check-in': 'Handhabungshinweise bei Check-in anzeigen',
'Show handling instructions at check-out': 'Handhabungshinweise bei Check-out anzeigen',
'Show handling instructions at ID checks (e.g. for event registration, payments)': 'Handhabungshinweise bei ID Prüfungen anzeigen (z.B. Ereignisregistrierung, Auszahlungen)',
'Show Location?': 'Gebiet/Standort anzeigen?',
'Show Map': 'Karte anzeigen',
'Show Region in Menu?': 'Region im Menu anzeigen?',
'Show author picture?': 'Bild des Authors anzeigen?',
'Show on Map': 'Auf Karte anzeigen',
'Show on map': 'Auf Karte anzeigen',
'Show totals': 'Summen anzeigen',
'Show': 'Zeige',
'Shower Availability': 'Verfügbarkeit von Duschen',
'Shower Handicap Facilities': 'Behindertengerechte Dusche',
'Shower with handicap facilities': 'Dusche mit behindertengerechter Einrichtung',
'Showing _START_ to _END_ of _TOTAL_ entries': 'Einträge _START_ bis _END_ von _TOTAL_',
'Showing 0 to 0 of 0 entries': 'Keine Einträge',
'Sign-up as a volunteer': 'Als Freiwilliger anmelden',
'Sign-up for Account': 'Für Benutzerkennung anmelden',
'Sign-up succesful - you should hear from us soon!': 'Registrierung erfolgreich - sie werden in Kürze von uns hören.',
'simplified/slow': 'vereinfacht/langsam',
'Site Administration': 'Administration der Seite',
'Site': 'Standort',
'Site Needs': 'Standortbedarf',
'Add Site Needs': 'Standortbedarf hinzufügen',
'Edit Site Needs': 'Standortbedarf ändern',
'Delete Site Needs': 'Standortbedarf löschen',
'Site Needs added': 'Standortbedarf hinzugefügt',
'Site Needs updated': 'Standortbedarf aktualisiert',
'Site Needs deleted': 'Standortbedarf gelöscht',
'Size of Family': 'Grösse der Familie',
'Situation Awareness & Geospatial Analysis': 'Situationseinschätzung & Räumliche Analyse',
'Sketch': 'Skizze',
'Skill Catalog': 'Fähigkeitskatalog',
'Skill Details': 'Details zur Fähigkeit',
'Skill Equivalence Details': 'Details zur Fähigkeits-Vergleichbarkeit',
'Skill Equivalence added': 'Fähigkeits-Vergleichbarkeit hinzugefügt',
'Skill Equivalence deleted': 'Fähigkeits-Vergleichbarkeit gelöscht',
'Skill Equivalence updated': 'Fähigkeits-Vergleichbarkeit aktualisiert',
'Skill Equivalence': 'Fähigkeits-Vergleichbarkeit',
'Skill Equivalences': 'Fähigkeits-Vergleichbarkeiten',
'Skill Provision Catalog': 'Fähigkeiten Bestimmungskatalog',
'Skill Provision Details': 'Fähigkeiten Bestimmung Details',
'Skill Provision added': 'Geschick Bestimmung hinzugefügt',
'Skill Provision deleted': 'Fähigkeitenbestimmung gelöscht',
'Skill Provision updated': 'Fähigkeiten Bestimmung aktualisiert',
'Skill Provision': 'Geschick Bestimmung',
'Skill Provisions': 'Fähigkeits-Bereitstellungen',
'Skill Status': 'Fähigkeitsstatus',
'Skill TYpe': 'Art der Fähigkeit',
'Skill Type Catalog': 'Fähigkeitstypen-Katalog',
'Skill Type Details': 'Details zum Fähigkeitstyp',
'Skill Type added': 'Fähigkeitstyp hinzugefügt',
'Skill Type deleted': 'Fähigkeitstyp gelöscht',
'Skill Type updated': 'Fähigkeitstyp aktualisiert',
'Skill Types': 'Fähigkeitstypen',
'Skill added': 'Fähigkeit hinzugefügt',
'Skill deleted': 'Fähigkeit gelöscht',
'Skill updated': 'Fähigkeit aktualisiert',
'Skill': 'Kenntnisse',
'Skills Catalog': 'Fähigkeiten Katalog',
'Skills Management': 'Fähigkeiten Management',
'Skills': 'Fähigkeiten',
'Skype ID': 'Skype ID',
'Slope failure, debris': 'Abhang Bruch, Schutt',
'Small Trade': 'Kleiner Handel',
'Smoke': 'Rauch',
'Snapshot Report': 'Bericht zur aktuellen Lage',
'Snapshot': 'Momentaufnahme',
'Snow Fall': 'Schneefall',
'Snow Squall': 'Schneeschauer',
'Soil bulging, liquefaction': 'Boden aufgequollen, Verflüssigung',
'Solid waste': 'Feste Abfälle',
'Solution Details': 'Details zur Lösung',
'Solution Item': 'Lösungselement',
'Solution added': 'Lösung hinzugefügt',
'Solution deleted': 'Lösung gelöscht',
'Solution updated': 'Lösung aktualisiert',
'Solution': 'Lösung',
'Solutions': 'Lösungen',
'Some': 'Einige',
'Sorry that location appears to be outside the area of the Parent.': 'Entschuldigung, diese Position scheint ausserhalb des Bereichs des übergeordneten Elements zu liegen.',
'Sorry that location appears to be outside the area supported by this deployment.': 'Entschuldigung, diese Position scheint ausserhalb des Bereichs zu liegen, der von dieser Anwendung unterstützt wird.',
'Sorry, I could not understand your request': 'Entschuldigung, leider konnte ich ihre Anfrage nicht verstehen',
'Sorry, only users with the MapAdmin role are allowed to create location groups.': 'Entschuldigung, nur Benutzer mit der Kartenadministrator-Rolle sind berechtigt Gruppen von Standorten/Gebieten zu erstellen.',
'Sorry, only users with the MapAdmin role are allowed to edit these locations': 'Entschuldigung, nur Benutzer mit der Kartenadministrator-Rolle sind berechtigt diese Standorte/Gebiete zu bearbeiten',
'Sorry, something went wrong.': 'Entschuldigung, leider is etwas schief gelaufen.',
'Sorry, that page is forbidden for some reason.': 'Entschuldigung, leider der Besuch dieser Seite aus einem bestimmten Grund nicht zulässig.',
'Sorry, that service is temporary unavailable.': 'Entschuldigung, leider steht dieses Service vorübergehend nicht zur Verfügung.',
'Sorry, there are no addresses to display': 'Entschuldigung, leider sind keine Adressen vorhanden um angezeigt zu werden.',
'Sought': 'Gesucht',
'Source ID': 'Quellen ID',
'Source Time': 'Zeit der Quelle',
'Source': 'Quelle',
'Sources of income': 'Einkommsquellen',
'Space Debris': 'Weltraumschrott',
'Spanish': 'Spanisch',
'Special Ice': 'Besonderes Eis',
'Special Marine': 'Spezielles Wasserfahrzeug',
'Specialized Hospital': 'Spezialisiertes Krankenhaus',
'Specific Area (e.g. Building/Room) within the Location that this Person/Group is seen.': 'Bestimmter Bereich (z.B. Gebäude/Raum) innerhalb eines Ortes in der diese Person/Gruppe gefunden werden kann.',
'Specific locations need to have a parent of level': 'Bestimmte Orte benötigen ein übergeordnetes Element der Stufe',
'Specify a descriptive title for the image.': 'Geben Sie einen beschreibenden Titel für das Bild an.',
'Specify the bed type of this unit.': 'Geben Sie den Bettentypen an für diese Einheit an.',
'Specify the number of available sets': 'Geben Sie die Anzahl der verfügbaren Sätze an',
'Specify the number of available units (adult doses)': 'Geben Sie die Anzahl der verfügbaren Einheiten ein (Dosis für Erwachsene)',
'Specify the number of available units (litres) of Ringer-Lactate or equivalent solutions': 'Geben Sie die Anzahl der verfügbaren Einheiten (in Liter) von Ringer-Lactat oder gleichwertige Lösungen ein',
'Specify the number of sets needed per 24h': 'Geben Sie die Anzahl der erforderlichen Sätze pro 24h ein',
'Specify the number of units (Erwachsenendosen) needed per 24h': 'Geben Sie die Anzahl der Einheiten ein (Dosis für Erwachsene) die pro 24h benötigt werden.',
'Specify the number of units (litres) of Ringer-Lactate or equivalent solutions needed per 24h': 'Geben Sie die Anzahl der Einheiten (in Liter) von Ringer-Lactat oder gleichwertigen Lösungen ein, die man pro 24h braucht.',
'Spherical Mercator?': 'Spherische Mercator?',
'Spouse': 'Ehegatte',
'Spreadsheet Importer': 'Import von Tabellendokumenten',
'Spreadsheet uploaded': 'Tabellendokument hochgeladen',
'Squall': 'Sturmschauer',
'Staff & Volunteers': 'Mitarbeiter & Freiwillige',
'Staff & Volunteers (Combined)': 'Mitarbeiter & Freiwillige (kombiniert)',
'Staff ID': 'Mitarbeiter-ID',
'Staff Management': 'Mitarbeitermanagement',
'Staff Member Details': 'Details zum Mitarbeiter',
'Staff Member added': 'Mitarbeiter hinzugefügt',
'Staff Members': 'Mitarbeiter',
'Staff Record': 'Mitarbeiterakte',
'Staff Report': 'Mitarbeiterbericht',
'Staff Type Details': 'Details zum Mitarbeitertyp',
'Staff Type added': 'Mitarbeitertyp hinzugefügt.',
'Staff Type deleted': 'Mitarbeitertyp gelöscht',
'Staff Type updated': 'Mitarbeitertyp aktualisiert',
'Staff Types': 'Mitarbeitertypen',
'Staff and Volunteers': 'Mitarbeiter und Freiwillige',
'Staff & Volunteers (combined)': 'Mitarbeiter & Freiwillige (kombiniert)',
'Staff member added': 'Mitarbeiter hinzugefügt',
'Staff present and caring for residents': 'Mitarbeiter ist anwesend und versorgt die Anwohner.',
'Staff with Contracts Expiring in the next Month': 'Mitarbeiter deren Veträge im Laufe des nächsten Monats ablaufen',
'Staff': 'Mitarbeiter',
'Staffing': 'Mitarbeiterausstattung',
'Stairs': 'Treppen',
'Start Date': 'Startdatum',
'Start date': 'Startdatum',
'Start of Period': 'Beginn einer Periode',
'State': 'Bundesland',
'State / Province': 'Staat / Bundesland',
'State /Province': 'Staat / Bundesland',
'Stationery': 'Büromaterial',
'Status Code': 'Statuscode',
'Status Report': 'Statusbericht',
'Status Reports': 'Statusberichte',
'Status Update': 'Statusaktualisierung',
'Status Updated': 'Status aktualisiert',
'Status added': 'Status hinzugefügt',
'Status deleted': 'Status gelöscht',
'Status of clinical operation of the facility.': 'Status von klinischen Möglichkeiten dieser Einrichtung.',
'Status of general operation of the facility.': 'Status von allgemeinen Möglichkeiten dieser Einrichtung.',
'Status of morgue capacity.': 'Status der Leichenhallenkapazität',
'Status of operations of the emergency department of this hospital.': 'Status von Möglichkeiten der Notaufnahme dieses Krankenhauses.',
'Status of security procedures/access restrictions in the hospital.': 'Status von Sicherheitsverfahren/Zugriffsbeschränkung in diesem Krankenhaus.',
'Status of the operating rooms of this hospital.': 'Der Status des Betriebsräume des Krankenhauses.',
'Status updated': 'Status aktualisiert',
'Status': 'Status',
'Stay Permit until': 'Aufenthaltsgestattung bis',
'Steel frame': 'Stahlrahmen',
'Stock': 'Bestand',
'Stock Counts': 'Bestandszahlen',
'Stock in Warehouse': 'Bestand im Warenlager',
'Stolen': 'Gestohlen',
'Store spreadsheets in the Eden database': 'Speichere Tabellendokument in die Eden Datenbank',
'Storeys at and above ground level': 'Stockwerke auf und über der Erdoberfläche',
'Storm Force Wind': 'Sturm Kraft Wind',
'Storm Surge': 'Sturm Spitzenauslastung',
'Stowaway': 'Blinder Passagier',
'Street Address': 'Adresse',
'Strong Wind': 'Starker Wind',
'Structural Hazards': 'Strukturelle Gefahren',
'Structural': 'Strukturell',
'Styles': 'Styles/Symbolisierungen',
'Style Field': 'Style-Feld',
'Style Values': 'Style-Werte',
'Sub-type': 'Unterart',
'Subject': 'Betreff',
'Submission successful - please wait': 'Absenden erfolgreich - bitte warten',
'Submission successful - please wait...': 'Absenden erfolgreich - bitte warten ...',
'Submit New (full form)': 'Daten erneut absenden (vollständiges Formular)',
'Submit New (triage)': 'Daten erneut absenden (Auswahl)',
'Submit New': 'Daten erneut absenden',
'Submit a request for recovery': 'Registrieren einer Bergungsanfrage',
'Submit new Level 1 assessment (full form)': 'Absenden einer neuen Stufe 1 Beurteilung (vollständiges Formular)',
'Submit new Level 1 assessment (triage)': 'Absenden einer neuen Stufe 1 Beurteilung (Auswahl)',
'Submit new Level 2 assessment': 'Absenden einer neuen Stufe 2 Beurteilung',
'Submit': 'Abschicken',
'Subscription Details': 'Details zum Abo',
'Subscription added': 'Abo hinzugefügt',
'Subscription deleted': 'Abo gelöscht',
'Subscription updated': 'Abo aktualisiert',
'Subscriptions': 'Abonnements',
'Subsector Details': 'Details zum Teilbereich',
'Subsector added': 'Teilbereich hinzugefügt',
'Subsector deleted': 'Teilbereich gelöscht',
'Subsector updated': 'Teilbereich aktualisiert',
'Subsector': 'Teilbereich',
'Subsectors': 'Teilbereich',
'Subsistence Cost': 'Verpflegungskosten',
'Suburb': 'Vorort',
'Suggest not changing this field unless you know what you are doing.': 'Bitte ändern sie diesen Bereich nur, wenn sie ganz genau wissen was sie da tun!!!!',
'Suitable': 'Geeignet',
'Summary by Administration Level': 'Zusammenfassung nach Verwaltungsstufe',
'Summary of Incoming Supplies': 'Zusammenfassung der eingehenden Vorräte',
'Summary of Releases': 'Zusammenfassung der Releases',
'Summary': 'Zusammenfassung',
'Sunday': 'Sonntag',
'Supermarket': 'Supermarkt',
'Supplier/Donor': 'Lieferant/Spender',
'Suppliers': 'Lieferanten',
'Supply Chain Management': 'Versorgungsketten-Management',
'Support provided': 'Durchgeführte Massnahmen',
'Support Request': 'Unterstützungsanforderung',
'Support Requests': 'Unterstützungsanforderungen',
'Supports the decision making of large groups of Crisis Management Experts by helping the groups create ranked list.': 'Unterstützt den Entscheidungsprozess von großen Gruppen von Krisenmanagementexperten indem man den Gruppen ermöglicht Prioritätenlisten aufzustellen.',
'Surgery': 'Chirugie',
'Survey Answer Details': 'Details zur Umfrage-Antwort',
'Survey Answer added': 'Umfrage-Antwort hinzugefügt',
'Survey Answer deleted': 'Umfrage-Antwort gelöscht',
'Survey Answer updated': 'Umfrage-Antwort aktualisiert',
'Survey Answer': 'Umfrage-Antwort',
'Survey Module': 'Umfrage Modul',
'Survey Name': 'Name der Umfrage',
'Survey Question Details': 'Details zur Umfrage-Frage',
'Survey Question Display Name': 'Angezeigter Name der Umfrage-Frage',
'Survey Question added': 'Umfrage-Frage hinzugefügt',
'Survey Question deleted': 'Umfrage-Frage gelöscht',
'Survey Question updated': 'Umfrage-Frage aktualisiert',
'Survey Question': 'Umfrage-Frage',
'Survey Series Details': 'Details zur Umfragenserie',
'Survey Series Name': 'Angezeigter Name der Umfrageserie',
'Survey Series added': 'Umfrageserie hinzugefügt',
'Survey Series deleted': 'Umfrageserie gelöscht',
'Survey Series updated': 'Umfrageserie aktualisiert',
'Survey Series': 'Umfrageserien',
'Survey Template Details': 'Details zur Umfragenvorlage',
'Survey Template added': 'Umfragenvorlage hinzugefügt',
'Survey Template deleted': 'Umfragenvorlage gelöscht',
'Survey Template updated': 'Umfragevorlage aktualisiert',
'Survey Template': 'Umfragenvorlage',
'Survey Templates': 'Umfragenvorlagen',
'Surveys': 'Umfragen',
'Suspended': 'Gesperrt',
'Suspended Cases': 'Gesperrte Fälle',
'Switch to 3D': 'In Google Earth anzeigen',
'Symbology': 'Symbolisierung',
'Sync Conflicts': 'Synchronisierungskonflikte',
'Sync History': 'Synchronisierungshistorie',
'Sync Now': 'Jetzt synchronisieren',
'Sync Partners are instances or peers (SahanaEden, SahanaAgasti, Ushahidi, etc.) that you want to sync information with. Click on the link on the right to go the page where you can add sync partners, search for sync partners and modify them.': 'Partner für die Synchronisation sind Instanzen von Peers (SahanaEden, SahanaAgasti, Ushahidi, etc. ) mit denen die aktuelle Intanz synchronisiert werden soll. Ein Klick auf den Link rechts bringt Sie zur Seite auf der Sie diese hinzufügen, suchen und ändern können.',
'Sync Partners': 'Partner für die Synchronisation',
'Sync Pools': 'Synchronisierungspools',
'Sync Schedule': 'Synchronisierungszeitplan',
'Sync Settings': 'Synchronisierungseinstellungen',
'Sync process already started on': 'Sync-Prozess bereits gestartet am',
'Synchronisation': 'Synchronisierung',
'Synchronization Conflicts': 'Synchronisierungskonflikte',
'Synchronization Details': 'Synchronisierung - Details',
'Synchronization History': 'Synchronisierungsgeschichte',
'Synchronization Peers': 'Synchronisierung von Peers',
'Synchronization Settings': 'Synchronisierungseinstellungen',
'Synchronization allows you to share data that you have with others and update your own database with latest data from other peers. This page provides you with information about how to use the synchronization features of Sahana Eden': 'Die Synchronisation erlaubt ihnen Daten gemeinsam zu nutzen, indem ihre eigene Datenbank mit aktuellen Daten anderer aktualisieren oder umgekehrt. Diese Seite informiert sie darüber wie sie das automatische Synchronisationsfeature von Sahana Eden verwenden.',
'Synchronization not configured.': 'Synchronisierung nicht konfiguriert.',
'Synchronization settings updated': 'Synchronisierungseinstellungen wurden aktualisiert',
'Synchronization': 'Synchronisierung',
'Syncronisation History': 'Synchronisierungshistorie',
'Table': 'Tabelle',
'Tags': 'Tags',
'Take shelter in place or per <instruction>': 'Unterkunft aufsuchen oder <instruction>',
'Task Details': 'Details zur Aufgabe',
'Task List': 'Aufgabenliste',
'Task Status': 'Aufgabenstatus',
'Task added': 'Aufgabe hinzugefügt',
'Task deleted': 'Aufgabe gelöscht',
'Task updated': 'Aufgabe aktualisiert',
'Tasks': 'Aufgaben',
'Team Description': 'Teambeschreibung',
'Team Details': 'Details zum Team',
'Team Id': 'Team ID',
'Team Leader': 'Teamleiter',
'Team Member added': 'Teammitglied hinzugefügt',
'Team Members': 'Teammitglieder',
'Team Name': 'Name des Teams',
'Team Type': 'Type des Teams',
'Team added': 'Team hinzugefügt',
'Team deleted': 'Team gelöscht',
'Team updated': 'Team aktualisiert',
'Technical testing only, all recipients disregard': 'Diese Benachrichtung ist ein technischer Test, bitte ignorieren',
'Telecommunications': 'Telekommunikation',
'Telephone': 'Telefon',
'Telephony': 'Telefonie',
'Temp folder %s not writable - unable to apply theme!': 'Temporärer Ordner %s nicht beschreibbar - Layout (theme) kann nicht angewandt werden!',
'Template Name': 'Name der Vorlage',
'Template file %s not readable - unable to apply theme!': 'Template Datei %s nicht lesbar - Layout (theme) kann nicht angewandt werden!',
'Templates': 'Vorlagen',
'Term for the fifth-level within-country administrative division (e.g. a voting or postcode subdivision). This level is not often used.': 'Begriff für die 5. Ebene der Verwaltungshierarchie eines Landes (z.B. eine Wahl- oder Postleitzahlenbereich). Diese Stufe wird nicht oft verwendet.',
'Term for the fourth-level within-country administrative division (e.g. Village, Neighborhood or Precinct).': 'Begriff für die 4. Ebene der Verwaltungshierarchie eines Landes (z.B. Dorf, Stadtteil).',
'Term for the primary within-country administrative division (e.g. State or Province).': 'Begriff für die 1. Ebene der Verwaltungshierarchie eines Landes (z. B. Staat oder Bundesland).',
'Term for the secondary within-country administrative division (e.g. District or County).': 'Begriff für die 2. Ebene der Verwaltungshierarchie eines Landes (z. B. Regierungsbezirk oder Landkreis).',
'Term for the third-level within-country administrative division (e.g. City or Town).': 'Begriff für die 3. Ebene der Verwaltungshierarchie eines Landes (z. B. Ort oder Stadt).',
'Term for the top-level administrative division (i.e. Country).': 'Begriff für die Verwaltung der höchsten Ebene (d. h. Land).',
'Test Results': 'Testergebnisse',
'Territorial Authority': 'Territoriale Behörde',
'Terrorism': 'Terrorismus',
'Tertiary Server (Optional)': 'Tertiärer Server (Optional)',
'Text Color for Text blocks': 'Text Farbe für Text Blöcke',
'Thank you for validating your email. Your user account is still pending for approval by the system administator (%s).You will get a notification by email when your account is activated.': 'Danke für die Validierung Ihrer E-Mail. Ihr Benutzeraccount wurde vom Systemadministrator noch nicht genehmigt (%s). Sie werden eine Benachrichtigung per E-Mail erhalten wenn ihr Account aktiviert wurde.',
'Thanks for your assistance': 'Danke für Ihre Hilfe',
'The "query" is a condition like "db.table1.field1==\'value\'". Something like "db.table1.field1 == db.table2.field2" results in a SQL JOIN.': 'Die "query" ist eine Bedingung für "db.table1.field1==\'value\'". Irgendetwas wie "db.table1.field1 == db.table2.field2" führt zu einem SQL JOIN.',
'The Area which this Site is located within.': 'Der Bereich, in dem sich dieser Ort befindet.',
'The Assessments module allows field workers to send in assessments.': 'Das Beurteilungsmodul erlaubt allen Aussendienstmitarbeitern ihre Beurteilungen einzusenden.',
'The Assessment Module stores assessment templates and allows responses to assessments for specific events to be collected and analyze': 'Das Beurteilungsmodul speichert Beurteilungsvorlagen und erlaubt Antworten auf Beurteilungen spezieller Ereignisse zu sammeln und auszuwerten',
'The Assessment Module stores assessment templates and allows responses to assessments for specific events to be collected and analyzed': 'Das Beurteilungsmodul speichert Beurteilungsvorlagen und erlaubt es Antworten zu speziellen Ereignissen zu sammeln und zu analysieren',
'The Author of this Document (optional)': 'Der Auto dieses Dokumentes (optional)',
'The Building Asssesments module allows building safety to be assessed, e.g. after an Earthquake.': 'Das Gebäudebeurteilungsmodul erlaubt die Sicherheit eines Gebäudes zu beurteilen, z. B. nach einem Erdbeben.',
'The Camp this Request is from': 'Das Camp von dem diese Anfrage stammt',
'The Camp this person is checking into.': 'Das Camp, in das diese Person überführt wird',
'The Current Location of the Person/Group, which can be general (for Reporting) or precise (for displaying on a Map). Enter a few characters to search from available locations.': 'Die aktuelle Position der Person/Gruppe, welche ungenau (für die Berichterstellung) oder genau (zur Anzeige von auf einer Karte) sein kann. Geben Sie einige Zeichen ein um aus verfügbaren Standorten auszuwählen.',
'The Email Address to which approval requests are sent (normally this would be a Group mail rather than an individual). If the field is blank then requests are approved automatically if the domain matches.': 'Die E-mail Adresse an welche die Genehmigungen gesendet werden (normalerweise ist das eine Gruppen-Mail, keine Adresse einer Einzelperson) Wenn das Feld leer ist, dann werden Anforderungen automatisch genehmigt, wenn die Domänennamen übereinstimmen.',
'The Incident Reporting System allows the General Public to Report Incidents & have these Tracked.': 'Das Vorfall Berichtssystem ermöglicht der Allgemeinheit Vorfälle zu melden und diese verfolgen zu lassen.',
'The Location the Person has come from, which can be general (for Reporting) or precise (for displaying on a Map). Enter a few characters to search from available locations.': 'Der Herkunftsort der Person kann ungenau (für die Berichterstellung) oder genau (zur anzeige auf einer Karte ) sein. Geben Sie einige Zeichen ein um aus verfügbaren Standorten auszuwählen.',
'The Location the Person is going to, which can be general (for Reporting) or precise (for displaying on a Map). Enter a few characters to search from available locations.': 'Der Ort, zu dem die Person gehen wird, welcher ungenau (für Berichte) oder genau (für die Darstellung auf einer Karte) sein kann. Geben Sie einige Zeichen ein um aus verfügbaren Standorten auszuwählen.',
'The Media Library provides a catalog of digital media.': 'Das Medienverzeichnis bietet einen Katalog digitaler Medien',
'The Messaging Module is the main communications hub of the Sahana system. It is used to send alerts and/or messages using SMS & Email to various groups and individuals before, during and after a disaster.': 'Das Nachrichtenmodul ist der Hauptknotenpunkt der Kommunikation des Sahana Systems. Es wird verwendet, um Warnungen und/oder andere Nachrichten mit Hilfe von SMS & E-Mail an unterschiedliche Gruppen und Einzelpersonen während und nach einem Katastrophenfall zu schicken.',
'The Organization Registry keeps track of all the relief organizations working in the area.': 'Das Organisationsregister gibt einen Überblick über alle Hilfsorganisationen, die in der Region arbeiten.',
'The Project Tracking module allows the creation of Activities to meet Gaps in Needs Assessments.': 'Das Projektüberwachungsmodul ermöglicht die Erstellung von Aktivitäten um Lücken in Anforderungsbewertungen zu füllen.',
'The Role this person plays within this hospital.': 'Die Rolle die diese Person im Krankenhaus übernimmt.',
'The Shelter Registry tracks all shelters and stores basic details regarding them. It collaborates with other modules to track people associated with a shelter, the services available etc.': 'Das Unterkunftsregister protokolliert alle Unterkünfte und speichert allgemeine Details. Es arbeitet mit anderen Modulen zusammen, um Menschen die sich in einer Unterkunft befinden, sowie die dort zur Verfügung stehenden Leistungen etc. zu dokumentieren.',
'The Shelter this Request is from': 'Die Unterkunft aus welcher diese Anforderung stammt',
'The Shelter this person is checking into.': 'Die Unterkunft in die diese Person eincheckt.',
'The URL for the GetCapabilities page of a Web Map Service (WMS) whose layers you want available via the Browser panel on the Map.': 'Die URL zur "GetCapabilities" Operation eines MapWebService (WMS), dessen Kartenbenen über die Anzeige verfügbar sein sollen.',
'The URL of your web gateway without the post parameters': 'Die URL ihres Web gateways ohne die POST parameter.',
'The URL to access the service.': 'Die URL für den Zugriff zum Service.',
'The Unique Identifier (UUID) as assigned to this facility by the government.': 'Die eindeutige Kennung (UUID) die dieser Einrichtung von der Regierung zugeordnet wurde.',
'The asset must be assigned to a site OR location.': 'Die Anlage muss einem Standort oder einem Gelände zugeordnet werden',
'The attribute which is used for the title of popups.': 'Das Atribut welches für den Titel von Dialogfenstern verwendet wird',
'The attribute within the KML which is used for the title of popups.': 'Das Attribut in der KML das für den Titel der Dialogfenster verwendet wird.',
'The attribute(s) within the KML which are used for the body of popups. (Use a space between attributes)': 'Die Attribute innerhalb der KML, die für den body des Dialogfenster verwendet werden sollen. (Verwenden Sie ein Leerzeichen zwischen Attributen)',
'The body height (crown to heel) in cm.': 'Die Körpergrösse (Kopf bis Fuss) in cm.',
'The country the person usually lives in.': 'Das Land, in dem die Person normalerweise lebt.',
'The default Organization for whom this person is acting.': 'Die Standardorganisation, für die diese Person agiert',
'The default Organization for whom you are acting.': 'Die Standardorganisation für welche Sie agieren',
'The duplicate record will be deleted': 'Der doppelte Datensatz wird gelöscht.',
'The first or only name of the person (mandatory).': 'Der erste oder einzige Name der Person (erforderlich)',
'The form of the URL is http://your/web/map/service?service=WMS&request=GetCapabilities where your/web/map/service stands for the URL path to the WMS.': 'Das Format der URL ist http://your/web/map/service?service=WMS&request=GetCapabilities wobei your/web/map/service für den Pfad der URL zum WMS steht',
'The language you wish the site to be displayed in.': 'Die Sprache in der die Seite angezeigt werden soll.',
'The list of Brands are maintained by the Administrators.': 'Die Liste der Marken wird von den Administratoren verwaltet.',
'The list of Catalogs are maintained by the Administrators.': 'Die Liste der Kataloge wird vom Administrator verwaltet.',
'The map will be displayed initially with this latitude at the center.': 'Die Karte wird zunächst auf diese Geographische Breite zentriert.',
'The map will be displayed initially with this longitude at the center.': 'Die Karte wird zunächst auf diese Geographische Länge zentriert.',
'The minimum number of features to form a cluster.': 'Die minimale Anzahl von Objekten, die als Cluster angezeigt werden.',
'The name to be used when calling for or directly addressing the person (optional).': 'Der zu verwendende Name beim Anfragen oder direkten Ansprechen der Person (optional).',
'The next screen will allow you to detail the number of people here & their needs.': 'Die nächste Bildschirm erlaubt es, nähere Angaben zur Anzahl Menschen hier & ihrer Bedürfnisse zu machen.',
'The number of Units of Measure of the Alternative Items which is equal to One Unit of Measure of the Item': 'Die Anzahl der Maßeinheiten eines alternativen Artikels, welcher einer Maßeinheit von diesem Artikel entspricht',
'The number of pixels apart that features need to be before they are clustered.': 'Mindestanzahl erforderlicher Pixel, damit sie nicht in Clustern zusammengefasst dargestellt werden.',
'The number of tiles around the visible map to download. Zero means that the 1st page loads faster, higher numbers mean subsequent panning is faster.': 'Die Anzahl der Teilbilder rund um den sichtbaren Kartenausschnitt die heruntergeladen werden. Null bedeutet, dass die erste Seite schneller geladen wird, höhere Zahlen bedeuten dass nachfolgendes Schwenken schneller ist.',
'The person at the location who is reporting this incident (optional)': 'Die Person vor Ort welche das Ereignis meldet (optional)',
'The post variable containing the phone number': 'Der POST Parameter, der die Telefonnummer beinhaltet',
'The post variable on the URL used for sending messages': 'Der POST Parameter, der die Nachricht beinhaltet.',
'The post variables other than the ones containing the message and the phone number': 'Die POST Parameter, die nicht die Nachricht oder Telefonnummer beinhalten',
'The serial port at which the modem is connected - /dev/ttyUSB0, etc on linux and com1, com2, etc on Windows': 'Der serielle Anschluss mit dem das Modem verbunden ist - /dev/ttyUSB0, etc unter linux und com1, com2, etc unter Windows',
'The server did not receive a timely response from another server that it was accessing to fill the request by the browser.': 'Der Server hat keine rechtzeitige Antwort von einem anderen Server erhalten, um die Anfrage des Clients beantworten zu können.',
'The server received an incorrect response from another server that it was accessing to fill the request by the browser.': 'Der Server hat eine ungültige Antwort von einem anderen Server erhalten, dass er zugreift um die Anfrage vom Browser zu erfüllen.',
'The site where this position is based.': 'Das Gelände auf dem dieser Standort/Gebiet liegt.',
'The staff responsibile for Facilities can make Requests for assistance. Commitments can be made against these Requests however the requests remain open until the requestor confirms that the request is complete.': 'Die zuständigen Mitarbeiter für Anlagen können Hilfe anfordern. Bezüglich dieser Anfragen können Zusagen gemacht werden. Diese bleiben solange offen, bis der Anforderer bestätigt, dass die Anfrage erfüllt ist.',
'The subject event no longer poses a threat or concern and any follow on action is described in <instruction>': 'Das genannte Ereignis stellt keine Bedrohung oder Sorge mehr dar und jede nachfolgende Aktion is unter <instruction> beschrieben.',
'The time at which the Event started.': 'Die Zeit zu der das Ereignis startete.',
'The total number of family members including this person.': 'Die Gesamtanzahl der Familienmitglieder einschliesslich dieser Person.',
'The token associated with this application on': 'Das token welches mit dieser Anwendung verbunden ist',
'The type of appointments which are completed with this type of event': 'Die Art von Terminen die mit Ereignissen dieses Typs abgeschlossen werden',
'The unique identifier which identifies this instance to other instances.': 'Die eindeutige Kennung (UUID), die diese Instanz bei der Kommunikation mit anderen Instanzen identifiziert.',
"The volunteer's role": "Rolle des Freiwilligen",
'The way in which an item is normally distributed': 'Die Art in der ein Artikel normalerweise verteilt wird.',
'The weight in kg.': 'Das Gewicht in kg.',
'The': 'Das',
'Thematic Mapping': 'Thematische Kartendarstellung',
'Theme Details': 'Details zum Thema',
'Theme added': 'Thema hinzugefügt',
'Theme deleted': 'Thema gelöscht',
'Theme updated': 'Thema aktualisiert',
'Theme': 'Thema',
'Themes': 'Themen',
'There are errors': 'Es sind Fehler aufgetreten',
'There are insufficient items in the Inventory to send this shipment': 'Es sind nicht genügend Artikel im Bestand um diese Lieferung zu abzusenden.',
'There are more than %(max)s results, please input more characters.': 'Mehr als %(max)s Treffer gefunden, bitte mehr Zeichen eingeben',
'There are multiple records at this location': 'An dieser Stelle gibt es mehrere Datensätze',
'There is no address for this person yet. Add new address.': 'Für diese Person gibt es noch keine Adresse. Fügen Sie eine neue Adresse hinzu.',
'These are settings for Inbound Mail.': 'Dies sind Einstellungen für eingehende Mail.',
'These are the Incident Categories visible to normal End-Users': 'Dies sind die für alle Endbenutzer sichtbaren Kategorien von Vorfällen',
'These need to be added in Decimal Degrees.': 'Diese müssen in Dezimalgrad hinzugefügt werden.',
'They': 'Sie',
'This appointment is mandatory before transfer': 'Dieser Termin ist zwingend erforderlich vor Transfer',
'This appointment requires the presence of the person concerned': 'Dieser Termin erfordert die Anwesenheit der betroffenen Person',
'This flag indicates that the person is currently accommodated/being held externally (e.g. in Hospital or with Police)': 'Dieses Flag zeigt an dass die Person momentan extern untergebracht ist oder festgehalten wird (z.B. im Krankenhaus, oder bei der Polizei)',
'This Group has no Members yet': 'Diese Gruppe hat noch keine Mitglieder',
'This Team has no Members yet': 'Dieses Team hat noch keine Mitglieder',
'This appears to be a duplicate of': 'Dies scheint ein Duplikat zu sein von',
'This file already exists on the server as': 'Diese Datei existiert bereits auf dem Server als',
'This is appropriate if this level is under construction. To prevent accidental modification after this level is complete, this can be set to False.': "Dies ist zulässig, wenn sich die Stufe noch im Aufbau befindet. Um unbeabsichtige Änderungen zu verhindern, nachdem dieses Level abgeschlossen ist, kann dies auf 'False' gesetzt werden.",
'This is the way to transfer data between machines as it maintains referential integrity.': 'Auf diese Weise werden Daten zwischen Maschinen übertragen um die referenzielle Integrität aufrecht zu erhalten.',
'This is the way to transfer data between machines as it maintains referential integrity...duplicate data should be removed manually 1st!': 'Auf diese Weise werden Daten zwischen Maschinen übertragen, um die referenzielle Integrität aufrechtzu erhalten. Doppelte Daten sollten vorher manuell entfernt werden.',
'This level is not open for editing.': 'Diese Stufe ist nicht zum Bearbeiten freigegeben.',
'This might be due to a temporary overloading or maintenance of the server.': 'Dies wurde möglicherweise durch eine vorübergehende Überlastung oder Wartung des Servers ausgelöst.',
'This module allows Inventory Items to be Requested & Shipped between the Inventories of Facilities.': 'Dieses Modul ermöglicht es, Bestandsartikel zwischen Beständen verschiedener Anlagen Anzufragen und zu liefern.',
'This module allows the editing of page content using a web browser.': 'Dieses Modul ermöglicht das Editieren der Webseite unter Verwendung des Browsers.',
'This module allows you to plan scenarios for both Exercises & Events. You can allocate appropriate Resources (Human, Assets & Facilities) so that these can be mobilized easily.': 'Mit diesem Modul können Szenarien sowohl für Übungen als auch für Ereignisse planen. Sie können geeignete Ressourcen (Menschen, Anlagen & Einrichtungen) zuordnen, damit diese leicht mobilisiert werden können.',
'This page shows you logs of past syncs. Click on the link below to go to this page.': 'Diese Seite zeigt ihnen die Protokolle von vorherigen Syncs. Klicken Sie auf den Link unten um auf diese Seite zu gelangen.',
'This person already belongs to this group': 'Diese Person gehört bereits zu dieser Gruppe',
'This person already belongs to another case group': 'Diese Person gehört bereits zu einer anderen Fallgruppe',
'This screen allows you to upload a collection of photos to the server.': 'Diese Seite ermöglicht ihnen eine Sammlung von Fotos zum Server hochzuladen.',
'This setting can only be controlled by the Administrator.': 'Diese Einstellung kann nur vom Systemverwalter vorgenommen werden.',
'This shipment has already been received.': 'Diese Lieferung wurde bereits empfangen.',
'This shipment has already been sent.': 'Diese Lieferung wurde bereits abgeschickt.',
'This shipment has not been received - it has NOT been canceled because it can still be edited.': 'Diese Lieferung wurde noch nicht empfangen - sie ist nicht abgebrochen worden weil sie immer noch editiert werden kann.',
'This shipment has not been sent - it has NOT been canceled because it can still be edited.': 'Diese Sendung wurde nicht gesendet-es ist nicht abgebrochen worden weil können immer noch bearbeitet werden.',
'This shipment will be confirmed as received.': 'Der Empfang dieser Lieferung wurde bestätigt.',
'This status applies for new cases unless specified otherwise': 'Dieser Status gilt für neue Fälle wenn nicht anders angegeben',
'This unit is for transitory accommodation upon arrival.': 'Diese Einheit dient zur kurzfristigen Unterbringung bei Ankunft.',
'This process can take a couple of minutes': 'Dieser Vorgang kann einige Minuten dauern',
'Thunderstorm': 'Gewitter',
'Thursday': 'Donnerstag',
'Ticket Details': 'Details zum Ticket',
'Ticket ID': 'Ticket-ID',
'Ticket added': 'Ticket hinzugefügt',
'Ticket deleted': 'Ticket gelöscht',
'Ticket updated': 'Ticket aktualisiert',
'Ticketing Module': 'Ticket Modul',
'Tile Mapping Service': 'TileMapService',
'Tilt-up concrete': 'Konkrete Neigung',
'Timber frame': 'Holzrahmen',
'Timeline Report': 'Bericht zum Zeitplan',
'Timeline': 'Zeitplan',
'Time Out': 'Ausgangszeit',
'Time Question': 'Zeit Frage',
'Title': 'Titel',
'Title to show for the Web Map Service panel in the Tools panel.': 'Titel, mit der die WebMapService-Leiste in der Werkzeugleiste angezeigt wird',
'To Location': 'Zum Standort',
'To Organization': 'Zur Organisation',
'To Person': 'Zu Händen von',
'To begin the sync process, click the button on the right =>': 'Zum Starten der Synchronisierung, klicken Sie auf die Schaltfläche auf der rechten Seite =>',
'To begin the sync process, click this button =>': 'Um den Synchronisierungsprozess zu starten, klicken Sie diese Schaltfläche =>',
'To create a personal map configuration, click': 'Um eine persönliche Kartenkonfiguration zu erstellen, klicken Sie auf',
'To edit OpenStreetMap, you need to edit the OpenStreetMap settings in models/000_config.py': 'Zum Bearbeiten von OpenStreetMap, müssen Sie die Einstellungen in models/000_config. py anpassen',
'To move the Timeline: use the mouse scroll wheel, the arrow keys or grab and drag the Timeline.': "Um die Zeitachse zu verschieben nutzen Sie bitte das Mausrad, die Pfeiltasten oder verschieben Sie sie per Drag'n Drop",
'To search by job title, enter any portion of the title. You may use % as wildcard.': 'Um nach einer Jobbezeichnung zu suchen, geben sie einen beliebigen Teil des Namens ein. Sie können % als Wildcard verwenden.',
'To variable': 'zu variieren',
'To': 'Bis',
'To Address': 'Empfängeradresse',
'Tools': 'Arbeitsmittel',
'Tornado': 'Wirbelsturm',
'Total # of Target Beneficiaries': 'Gesamtzahl der Nutznießer',
'Total # of households of site visited': 'Gesamtzahl der Haushalte des besuchten Geländes',
'Total Beds': 'Betten insgesamt',
'Total Beneficiaries': 'Gesamtzahl Nutznießer',
'Total Budget': 'Gesamtbudget',
'Total Capacity': 'Gesamtkapazität',
'Total Capacity (Night)': 'Gesamtkapazität (Nacht)',
'Total Cost per Megabyte': 'Gesamtkosten pro Megabyte',
'Total Cost per Minute': 'Gesamtkosten pro Minute',
'Total Cost': 'Gesamtkosten',
'Total Monthly Cost': 'Gesamte monatliche Kosten',
'Total Monthly': 'Insgesamt Monatlich',
'Total One-time Costs': 'Summe einmaliger Kosten',
'Total Persons': 'Gesamtzahl an Personen',
'Total Records: %(numrows)s': 'Gesamtzahl an Datensätzen %(numrows)s',
'Total Recurring Costs': 'Gesamte wiederkehrende Kosten',
'Total Unit Cost': 'Gesamtstückkosten',
'Total Units': 'Summe Einheiten',
'Total Value': 'Gesamtwert',
'Total Volume (m3)': 'Gesamtvolumen (m3)',
'Total Weight (kg)': 'Gesamtgewicht (kg)',
'Total gross floor area (square meters)': 'Gesamtgröße der Fläche (Quadratmeter)',
'Total number of beds in this hospital. Automatically updated from daily reports.': 'Gesamtzahl der Betten in diesem Krankenhaus. Automatisch aktualisiert über die täglichen Berichte.',
'Total number of houses in the area': 'Gesamtzahl der Häuser im Gebiet',
'Total number of schools in affected area': 'Gesamtzahl der Schulen im betroffenen Gebiet',
'Total population of site visited': 'Gesamtzahl der Bevölkerung des besuchten Gebietes',
'Total': 'Summe',
'Tourist Group': 'Touristengruppe',
'Town': 'Stadt',
'Town / Municipality': 'Ort / Stadtbezirk',
'Traces internally displaced people (IDPs) and their needs': 'Verfolgung von Binnenflüchtlingen (IDP) und deren Bedürfnisse',
'Tracing': 'Verfolgung',
'Track Details': 'Details zum Track',
'Track deleted': 'Track gelöscht',
'Track updated': 'Track aktualisiert',
'Track uploaded': 'Track hochgeladen',
'Track with this Person?': 'Diese Person verfolgen?',
'Track': 'Track',
'Tracking of Projects, Activities and Tasks': 'Verfolgen von Projekten, Aktivitäten und Aufgaben',
'Tracking of basic information on the location, facilities and size of the Shelters': 'Verfolgung von Basisinformationen über Ort, Einrichtungen und Größe von Unterkünften',
'Tracks the location, distibution, capacity and breakdown of victims in Shelters': 'Verfolgung der Position, Verteilung, Kapazität und Aufteilung der Opfer auf Unterkünfte',
'Tracks': 'Verfolgungen',
'Traffic Report': 'Datenverkehrsbericht',
'Training Course Catalog': 'Schulungskurs-Katalog',
'Training Details': 'Details zur Schulung',
'Training Event': 'Schulungskurs',
'Training Events': 'Schulungskurse',
'Training Facility': 'Schulungseinrichtung',
'Training Hours (Month)': 'Trainingsstunden (Monat)',
'Training Hours (Year)': 'Trainingsstunden (Jahr)',
'Training Report': 'Schulungsbericht',
'Training added': 'Schulung hinzugefügt',
'Training deleted': 'Schulung gelöscht',
'Training updated': 'Schulung aktualisiert',
'Training': 'Schulung',
'Trainings': 'Weiterbildungen / Übungen',
'Transferable': 'Transferierbar',
'Transferred': 'Transferiert',
'Transfer Completed': 'Transfer Erledigt',
'Transfer to': 'Transfer nach',
'Transition Effect': 'Übergangseffekt',
'Transit Status': 'Transitstatus',
'Transitory Accommodation': 'Durchgangsunterkunft',
'Translation': 'Übersetzung',
'Transportation assistance, Rank': 'Transport-Unterstützung, Rank',
'Trauma Center': 'Trauma Zentrum',
'Travel Cost': 'Reisekosten',
'Tropical Storm': 'Tropischer Sturm',
'Tropo Messaging Token': 'Tropo Nachrichten Token',
'Tropo Settings': 'Tropo Einstellungen',
'Tropo settings updated': 'Tropo Einstellungen aktualisiert',
'Truck': 'Lastwagen',
'Try checking the URL for errors, maybe it was mistyped.': 'Untersuchen Sie die URL auf Fehler, vielleicht war sie falsch geschrieben.',
'Try hitting refresh/reload button or trying the URL from the address bar again.': "Versuchen Sie den Knopf 'Aktualisieren/Erneut Laden' oder versuchen Sie nochmals die URL aus der Adresszeile.",
'Try refreshing the page or hitting the back button on your browser.': "Versuchen Sie die Seite zu aktualisieren oder den 'Zurück'-Knopf im Browser zu nutzen.",
'Tuesday': 'Dienstag',
'Tugboat Capacity': 'Schleppkahnkapazitäten',
'Tweeted by': 'Getwittert von',
'Tweeted on': 'Getwittert auf',
'Twilio Channels': 'Twilio Kanäle',
'Twitter Channels': 'Twitter Kanäle',
'Twitter ID or #hashtag': 'Twitter-ID oder #hashtag',
'Twitter InBox': 'Twitter Eingang',
'Twitter Search': 'Twitter Suche',
'Twitter Search Results': 'Twitter Suchergebnisse',
'Twitter Settings': 'Einstellungen für Twitter',
'Type of Construction': 'Bautyp',
'Type of water source before the disaster': 'Typ der Wasserquelle vor der Katastrophe',
'Type': 'Typ',
'Types': 'Typen',
'UN': 'UN',
'Un-Repairable': 'Nicht zu reparieren',
'Unable to parse CSV file!': 'CSV Datei kann nicht analysiert werden!',
'Understaffed': 'Unterbesetzt',
'Unidentified': 'Nicht identifiziert',
'Unit Cost': 'Kosten für Einheit',
'Unit Value': 'Einheitswert',
'Unit added': 'Einheit hinzugefügt',
'Unit deleted': 'Einheit gelöscht',
'Unit of Measure': 'Maßeinheit',
'Unit updated': 'Einheit aktualisiert',
'Unit': 'Einheit',
'Units': 'Einheiten',
'Unknown Peer': 'Unbekannter Peer',
'Unknown type of facility': 'Unbekannter Einrichtungstyp',
'Unknown': 'unbekannt',
'Unmark as duplicate': 'Duplikatsmarkierung entfernen',
'Unreinforced masonry': 'Nicht verstärktes Mauerwerk',
'Unresolved Conflicts': 'Ungelöste Konflikte',
'Unsafe': 'Unsicher',
'Unselect to disable the modem': 'Abwählen um das Modem zu deaktivieren',
'Unsent': 'Nicht gesendet',
'Unspecified': 'Unspezifiziert',
'Unsupported data format!': 'Nicht unterstütztes Datenformat!',
'Unsupported method!': 'Nicht unterstützte Methode!',
'Update Activity Report': 'Aktivitätsbericht aktualisieren',
'Update Allowance Status': 'Taschengeld Status Aktualisierung',
'Update Cholera Treatment Capability Information': 'Aktualisieren der Informationen zu den Cholera Behandlungsmöglichkeiten',
'Update Request': 'Anfrage Aktualisieren',
'Update Service Profile': 'Leistungsprofil aktualisieren',
'Update Status': 'Status aktualisieren',
'Update Task Status': 'Status der Aufgabe aktualisieren',
'Update Unit': 'Enheit Aktualisieren',
'Update if Master': 'Aktualisiere wenn Master',
'Update if Newer': 'Aktualisiere falls neuer',
'Update your current ordered list': 'Aktualisieren Sie ihre aktuell bestellte Liste',
'Update': 'Aktualisieren',
'Updated By': 'Aktualisiert von',
'Update now': 'Jetzt aktualisieren',
'Upload Photos': 'Fotos hochladen',
'Upload Spreadsheet': 'Tabellendokument hochladen',
'Upload Track': 'Verfolgung hochladen',
'Upload a Spreadsheet': 'Ein Tabellendokument hochladen',
'Upload a file formatted according to the Template.': 'Laden Sie eine entsprechend der Vorlage formatierte Datei hoch.',
'Upload an Assessment Template import file': 'Upload einer Beurteilungsvorlage',
'Upload an image file (bmp, gif, jpeg or png), max. 300x300 pixels!': 'Grafikdatei hochladen (bmp, gif, jpeg-oder png), max. 300x300 Pixel!',
'Upload an image file here.': 'Laden Sie hier die Grafikdatei hoch.',
'Upload an image, such as a photo': 'Laden Sie eine Grafikdatei hoch, wie beispielsweise ein Foto',
'Uploaded Image': 'Hochgeladenes Bild',
'Upload translated files': 'Übersetzte Dateien hochladen',
'Upon Request': 'Eingehende Anfrage',
'Urban Fire': 'Siedlungsfeuer',
'Urban area': 'Stadtgebiet / Ballungsgebiet',
'Urgent': 'Dringend',
'Use (...)&(...) for AND, (...)|(...) for OR, and ~(...) for NOT to build more complex queries.': 'Verwende (...)&(...) für UND, (...)|(...) für ODER und ~(...) für NICHT um komplexere Abfragen zu erstellen.',
'Use Geocoder for address lookups?': "Verwendung von 'Geocoder' für Adressenüberprüfung?",
'Use deg, min, sec': 'Nutze Grad, Minuten, Sekunden',
'Use decimal': 'Nutze Dezimalgrad',
'Use default': 'Standardwert verwenden',
'Use for Login?': 'Für Login verwenden?',
'Use these links to download data that is currently in the database.': 'Verwenden Sie diese Links um Daten, die derzeit in der Datenbank liegen herunterzuladen.',
'Used by IRS & Assess': 'Verwendet vom IRS & Assess',
'Used in onHover Tooltip & Cluster Popups to differentiate between types.': 'Verwendet in onHover Tooltip & Cluster Popups um verschiedene Typen zu unterscheiden.',
'Used to build onHover Tooltip & 1st field also used in Cluster Popups to differentiate between records.': 'Verwendet um onHover Tooltip zu erstellen & das 1. Feld wird ebenfalls im Cluster Dialogfeld benutzt um zwischen verschiedenen Datensätzen zu unterscheiden.',
'Used to check that latitude of entered locations is reasonable. May be used to filter lists of resources that have locations.': 'Wird zur Überprüfung genutzt, dass die eingegebene Geographische Länge für den Ort sinnvoll ist. Kann verwendet werden um Resources zu filtern die Standorte haben.',
'Used to check that longitude of entered locations is reasonable. May be used to filter lists of resources that have locations.': 'Wird zur Überprüfung genutzt, dass die eingegebene Geographische Breite für den Ort sinnvoll ist. Kann verwendet werden um Resources zu filtern die Standorte haben.',
'Used to import data from spreadsheets into the database': 'Dient dazu Daten aus Tabellendokumenten in die Datenbank zu übertragen.',
'Used within Inventory Management, Request Management and Asset Management': 'Verwendung beim der Bestands-, Anfrage- und Anlagenverwaltung',
'User Account': 'Benutzerkonto',
'User Account has been Disabled': 'Das Benutzerkonto wurde deaktiviert',
'User Details': 'Details zum Benutzer',
'User Management': 'Benutzerverwaltung',
'User Profile': 'Benutzerprofil',
'User Requests': 'Benutzeranfragen',
'User Updated': 'Benutzer aktualisiert',
'User added': 'Benutzer hinzugefügt',
'User already has this role': 'Der Benutzer hat bereits diese Rolle',
'User deleted': 'Benutzer gelöscht',
'User updated': 'Benutzer aktualisiert',
'User': 'Benutzer',
'Username': 'Benutzername',
'User Role Required': 'Erforderliche Benutzerrolle',
'User role required to register events of this type': 'Erforderliche Benutzerrolle um Ereignisse dieses Typs registrieren zu dürfen',
'Users removed': 'Benutzer entfernt',
'Users': 'Benutzer',
'Uses the REST Query Format defined in': 'Verwendet das REST-Abfrageformat das definiert ist in',
'Utilities': 'Dienstprogramme',
'Utility, telecommunication, other non-transport infrastructure': 'Dienstprogramm, Telekommunikation, andere nicht-Verkehrsinfrastruktur',
'Utilization Report': 'Verwendungsbericht',
'Valid until': 'Gültig bis',
'Value per Pack': 'Wert pro Packet',
'Value': 'Wert',
'Various Reporting functionalities': 'Verschiedene Funktionalitäten für das Berichtswesen',
'Vehicle Categories': 'Fahrzeugkategorien',
'Vehicle Crime': 'Fahrzeug Kriminalität',
'Vehicle Height (m)': 'Höhe des Fahrzeugs (m)',
'Vehicle Management': 'Fahrzeugmanagement',
'Vehicle Plate Number': 'Fahrzeugnummernschild',
'Vehicle Type': 'Fahrzeugtyp',
'Vehicle Types': 'Fahrzeugtypen',
'Vehicle Weight (kg)': 'Gewicht des Fahrzeugs (kg)',
'Vehicle': 'Fahrzeug',
'Vehicles': 'Fahrzeuge',
'Vehicles are assets with some extra details.': 'Fahrzeuge sind Anlagen, die mit einigen speziellen Funktionen ausgestattet sind',
'Venue': 'Örtlichkeit',
'Verification Status': 'Prüfstatus',
'Verified?': 'Geprüft?',
'Verify password': 'Passwortprüfung',
'Very Good': 'Sehr gut',
'Very High': 'Sehr hoch',
'Vessel Max Length': 'Wasserfahrzeug maximale Länge',
'View Alerts received using either Email or SMS': 'Empfangene Warnungen über E-Mail oder SMS',
'View All': 'Alles anzeigen',
'View Error Tickets': 'Fehler Tickets ansehen',
'View Fullscreen Map': 'Vollbild Karte anzeigen',
'View Image': 'Bild anzeigen',
'View Items': 'Artikel anzeigen',
'View On Map': 'Auf Karte anzeigen',
'View Outbox': 'Postausgang anzeigen',
'View Picture': 'Bild anzeigen',
'View Settings': 'Einstellungen anzeigen',
'View Test Result Reports': 'Zeige Berichte der Testergebnisse',
'View Tickets': 'Tickets anzeigen',
'View Translation Percentage': 'Zeige Übersetzungsstatistik',
'View and/or update their details': 'Anzeige und/oder Aktualisieren Ihrer Detailinformationen',
'View as Pages': 'Anzeige als Seiten',
'View or update the status of a hospital.': 'Anzeige oder Aktualisieren des Status eines Krankenhauses.',
'View pending requests and pledge support.': 'Anstehende Anforderungen anzeigen und Zusageunterstützung.',
'View the hospitals on a map.': 'Krankenhäuser auf einer Karte anzeigen',
'View/Edit the Database directly': 'Die Datenbank direkt anzeigen/bearbeiten',
'Village Leader': 'Dorfvorsteher',
'Village / Suburb': 'Ortschaft / Vorort',
'Village': 'Dorf',
'Visible?': 'Sichtbar?',
'Visual Recognition': 'Visuelle Erkennung',
'Volcanic Ash Cloud': 'Wolke vulkanischer Asche',
'Volcanic Event': 'Vulkanischen Ereignis',
'Volume (m3)': 'Volumen (m3)',
'Volunteer Availability': 'Verfügbarkeit von Freiwilligen',
'Volunteer Contact': 'Kontaktdaten des Freiwilligen',
'Volunteer Details': 'Details zu Freiwilligen',
'Volunteer Information': 'Freiwilligeninformation',
'Volunteer Management': 'Management von Freiwilligen',
'Volunteer Project': 'Freiwilligen Projekt',
'Volunteer Record': 'Freiwilligen Datensatz',
'Volunteer Report': 'Freiwilligen Bericht',
'Volunteer Request': 'Freiwilligen Anforderung',
'Volunteer Role': 'Rolle des Freiwilligen',
'Volunteer Role Catalog': 'Rollenkatalog für Freiwillige',
'Volunteer added': 'Freiwilliger hinzugefügt',
'Volunteer availability added': 'Freiwilligen Verfügbarkeit hinzugefügt',
'Volunteer availability deleted': 'Freiwilligen Verfügbarkeit geöscht',
'Volunteer availability updated': 'Freiwilligen Verfügbarkeit aktualisiert',
'Volunteer deleted': 'Freiwilliger gelöscht',
'Volunteer details updated': 'Details zu Freiwilligen aktualisiert',
'Volunteers were notified!': 'Freiwillige wurden unterrichtet!',
'Volunteers': 'Freiwillige',
'Volunteer': 'Freiwilliger',
'Vote': 'Abstimmung',
'Votes': 'Abstimmungen',
'WASH': 'WASH',
'Walking Only': 'Nur laufen',
'Wall or other structural damage': 'Wand oder andere Gebäudeschäden',
'Warehouse Details': 'Details zu Warenlager',
'Warehouse Stock': 'Lagerbestand',
'Warehouse Stock Report': 'Bericht zum Warenlagerbestand',
'Warehousing Storage Capacity': 'Warenlager Ablagekapazität',
'Warehouse Type': 'Warenlagertyp',
'Warehouse Types': 'Warenlagertypen',
'Warehouse added': 'Warenlager hinzugefügt',
'Warehouse deleted': 'Warenlager gelöscht',
'Warehouse updated': 'Warenlager aktualisiert',
'Warehouse': 'Warenlager',
'Warehouses': 'Warenlager',
'Water Sanitation Hygiene': 'Wasser Abwasserentsorgung Hygiene',
'Water collection': 'Wassersammlung',
'Water gallon': 'Wasser Gallonen',
'Water storage containers in households': 'Wasser-Behälter in Haushalten',
'Water supply': 'Wasserversorgung',
'Waybill Number': 'Frachtbriefnummer',
'WB': 'Frachtbriefnr.',
'Web Feature Service': 'WebFeatureService',
'Web Map Service': 'WebMapService',
'Web Map Service Browser Name': 'WebMapService Browser Name',
'Web Map Service Browser URL': 'WebMapService Browser URL',
'Website': 'Webseite',
'Wednesday': 'Mittwoch',
'Weight (kg)': 'Gewicht (kg)',
'Weight': 'Gewicht',
'Welcome to the Sahana Portal at': 'Willkommen beim Sahana Portal',
'Well-Known Text': 'WellKnownText (OGC-WKT)',
'What the Items will be used for': 'Beabsichtigte Verwendung der Artikel',
'Wheat': 'Weizen',
'When reports were entered': 'Wann die Berichte eingegeben wurden',
'Whiskers': 'Barthaare',
'Who is doing what and where': 'Wer macht was und wo',
'Who usually collects water for the family?': 'Wer sammelt normalerweise Wasser für die Familie?',
'Width': 'Breite',
'Width (m)': 'Breite (m)',
'Wild Fire': 'Wildfeuer',
'Wind Chill': 'Kälte vom Wind',
'Window frame': 'Fensterrahmen',
'Winter Storm': 'Wintersturm',
'Women of Child Bearing Age': 'Frauen im gebärfähigen Alter',
'Women participating in coping activities': 'Frauen die sich an den Hilfsaktivitäten beteiligen',
'Women who are Pregnant or in Labour': 'Frauen die schwanger sind oder in den Wehen',
'Womens Focus Groups': 'Focus Gruppen für Frauen',
'Wooden plank': 'Hölzerne Planke',
'Wooden poles': 'Holzmasten',
'Workflow Position': 'Position im Ablauf',
'Working hours end': 'Arbeitszeit Ende',
'Working hours start': 'Arbeitszeit Beginn',
'Working or other to provide money/food': 'Arbeiten oder etwas anderes um Geld/Lebensmittel zur Verfügung zu stellen.',
'written-only': 'nur schriftlich',
'XYZ Tiles': 'XYZ Tiles',
'X-Ray': 'Röntgen',
'X-Ray Done': 'Röntgen erledigt',
'X-Ray Place': 'Röntgen Ort',
'YES': 'JA',
'Year built': 'Baujahr',
'Year of Manufacture': 'Herstellungsjahr',
'Year': 'Jahr',
'Yellow': 'Gelb',
'Yes': 'Ja',
'yes': 'ja',
'You are a recovery team?': 'Sind Sie ein Bergungsteam?',
'You are attempting to delete your own account - are you sure you want to proceed?': 'Sie versuchen Ihr eigenes Konto zu löschen - sind Sie sicher, dass Sie fortfahren möchten?',
'You are currently reported missing!': 'Sie sind derzeit als vermisst gemeldet!',
'You can change the configuration of synchronization module in the Settings section. This configuration includes your UUID (unique identification number), sync schedules, beacon service and so on. Click the following link to go to the Sync Settings page.': 'Sie können die Konfiguration des Synchronisierungsmodules unter Einstellungen anpassen. Diese Konfiguration enthält ihre UUID (unique identification number), Synchronisierungszeitpläne, Beacon-Service, usw. . Klicken sie auf den folgenden Link um zu den Einstellungen für die Synchronisierung zu gelangen.',
'You can click on the map below to select the Lat/Lon fields': 'Sie können auf die untere Karte klicken um Geographische und Geographische Breiten abzugreifen.',
'You can search by name, ID or case number': 'Sie können nach Namen, ID oder Fallnummer recherchieren',
'You can search by name, ID, EasyOpt number and comments': 'Sie können nach Namen, ID, EasyOpt Nummer oder Kommentaren recherchieren',
'You can select the Draw tool': 'Sie können das Zeichen Tool verwenden',
'You can set the modem settings for SMS here.': 'Sie können die Modemeinstellungen für SMS hier festlegen.',
'You can use the Conversion Tool to convert from either GPS coordinates or Degrees/Minutes/Seconds.': 'Sie können das Konvertierungprogamm verwenden von GPS-Koordinatenoder Grad/Minuten/Sekunden umzuwandeln.',
'You do not have permission for any facility to make a commitment.': 'Sie haben keine Berechtigung für irgendeine Einrichtung eine Zusage zu machen.',
'You do not have permission for any facility to make a request.': 'Sie haben keine Berechtigung für irgendeine Einrichtung eine Anfrage zu starten.',
'You do not have permission for any site to add an inventory item.': 'Sie haben keine Berechtigung für irgendein Gelände einen Bestandsartikel hinzuzufügen.',
'You do not have permission for any site to receive a shipment.': 'Sie haben keine Berechtigung für irgendein Gelände eine Lieferung anzunehmen.',
'You do not have permission for any site to send a shipment.': 'Sie haben keine Berechtigung für irgendein Gelände eine Lieferung abzusenden.',
'You do not have permission to cancel this received shipment.': 'Sie haben keine Berechtigung diese erhaltene Lieferung zu löschen.',
'You do not have permission to cancel this sent shipment.': 'Sie haben keine Berechtigung diese gesendete Lieferung zu löschen.',
'You do not have permission to make this commitment.': 'Sie haben keine Berechtigung diese Zusage zu machen.',
'You do not have permission to receive this shipment.': 'Sie haben keine Berechtigung diese Lieferung entgegenzunehmen.',
'You do not have permission to send a shipment from this site.': 'Sie haben keine Berechtigung Lieferungen von diesem Gelände zu senden.',
'You do not have permission to send messages': 'Sie habe keine Berechtigung Nachrichten zu versenden',
'You do not have permission to send this shipment.': 'Sie haben keine Berechtigung diese Lieferung zu senden.',
'You have a personal map configuration. To change your personal configuration, click': 'Sie haben eine persönliche Kartenkonfiguration. Um ihre persönliche Konfiguration zu ändern, klicken Sie hier',
'You have found a dead body?': 'Sie haben eine Leiche gefunden?',
'You must be logged in to register volunteers.': 'Sie müssen angemeldet sein, um Freiwillige zu registrieren.',
'You must be logged in to report persons missing or found.': 'Sie müssen angemeldet sein, um fehlende oder gefundene Personen zu melden.',
'You must provide a series id to proceed.': 'Sie müssen eine serien-id vorweisen, um fortzufahren.',
'You should edit Twitter settings in models/000_config.py': 'Sie sollten die Twitter Einstellungen unter models/000_config.py bearbeiten',
'Your current ordered list of solution items is shown below. You can change it by voting again.': 'Ihre aktuelle, geordnete Liste der Lösungselemente wird unten angezeigt. Sie können es durch Abstimmen erneut verändern.',
'Your post was added successfully.': 'Der Eintrag wurde erfolgreich hinzugefügt.',
'Your system has been assigned a unique identification (UUID), which other computers around you can use to identify you. To view your UUID, you may go to Synchronization -> Sync Settings. You can also see other settings on that page.': 'Ihr System verfügt über eine eindeutige ID (UUID), die andere Computer nützen können um Sie zu identifizieren. Zum Anzeigen Ihrer UUID, können Sie zur Synchronisierung gehen --> Sync Einstellungen Sie könnem auch andere Einstellungen auf dieser Seite einsehen.',
'Zero Hour': 'Stunde null',
'Zinc roof': 'Zinkdach',
'Zoom Levels': 'Zoomebenen',
'Zoom in': 'Hineinzoomen',
'Zoom to Current Location': 'Auf aktuelles Gebiet/Standort fokussieren',
'Zoom to maximum map extent': 'Auf maximale Kartenausdehung fokussieren',
'Zoom': 'Zoomen',
'active': 'aktiv',
'added': 'hinzugefügt',
'all records': 'Alle Datensätze',
'allows a budget to be developed based on staff & equipment costs, including any admin overheads.': 'Ermöglicht ein Budget zu entwickeln, basierend auf Mitarbeiter- und Gerätekosten, einschließlich aller administrativen Gemeinkosten.',
'allows for creation and management of surveys to assess the damage following a natural disaster.': 'Ermöglicht die Erstellung und Verwaltung von Umfragen zur Beurteilung von Schäden nach einer Naturkatastrophe.',
'an individual/team to do in 1-2 days': 'Eine Aufwand von 1-2 Tagen für ein einzelnes Team',
'assigned': 'zugewiesen',
'average': 'Durchschnitt',
'black': 'schwarz',
'blue': 'blau',
'brown': 'braun',
'business_damaged': 'Business_beschädigt',
'by': 'durch',
'can be used to extract data from spreadsheets and put them into database tables.': 'Kann verwendet werden um Daten von einer Tabelle zu extrahieren und diese in Datenbanktabellen einzutragen.',
'check all': 'Alles markieren',
'click for more details': 'hier klicken, um mehr Details zu erhalten',
'consider': 'Berücksichtigen',
'curly': 'lockig',
'currently registered': 'derzeitig registriert',
'daily': 'täglich',
'dark': 'dunkel',
'data uploaded': 'hochgeladene Daten',
'database %s select': 'Datenbank%s gewählt',
'database': 'Datenbank',
'deceased': 'Verstorbene',
'delete all checked': 'Alle Ausgewählten löschen',
'deleted': 'gelöscht',
'design': 'Design',
'diseased': 'erkrankt',
'displaced': 'vertrieben',
'divorced': 'geschieden',
'done!': 'fertig!',
'duplicate': 'Dublette',
'eg. gas, electricity, water': 'zum Beispiel Gas, Strom, Wasser',
'enclosed area': 'eingeschlossener Bereich',
'export as csv file': 'Exportieren als CSV-Datei',
'fat': 'fett',
'feedback': 'Rückmeldung',
'female': 'weiblich',
'flush latrine with septic tank': 'die provisorische Toilette mit dem fauligen Tank spülen',
'food_sources': 'lebensmittel_quellen',
'forehead': 'Stirn',
'found': 'gefunden',
'from Twitter': 'aus Twitter',
'green': 'Grün',
'grey': 'grau',
'here': 'hier',
'high': 'hoch',
'hourly': 'stündlich',
'households': 'Haushalte',
'identified': 'identifiziert',
'ignore': 'ignorieren',
'in Deg Min Sec format': 'im Format Grad Minuten Sekunden',
'inactive': 'inaktiv',
'injured': 'verletzt',
'insert new %s': 'neue %en hinzufügen',
'insert new': 'neu einfügen',
'invalid request': 'Ungültige Anfrage',
'invalid': 'ungültig',
'is a central online repository where information on all the disaster victims and families, especially identified casualties, evacuees and displaced people can be stored. Information like name, age, contact number, identity card number, displaced location, and other details are captured. Picture and finger print details of the people can be uploaded to the system. People can also be captured by group for efficiency and convenience.': 'ist ein zentrales online Verzeichnis, in dem Informationen zu allen Opfern und Familien der Katastrophe gespeichert werden können, insbesondere identifizierte Verluste, Evakuierte, Flüchtlinge, Heimatlose. Informationen wie Name, Alter, Kontaktnummer, Ausweisnummer, Vertriebenen-Ort und andere Details werden erfasst. Fotos und Fingerabdrücke der Leute können auf das System hochgeladen werden. Personen können zum Zweck der Effizienz und Einfachheit auch in Gruppen zusammengefasst werden',
'is envisioned to be composed of several sub-modules that work together to provide complex functionality for the management of relief and project items by an organization. This includes an intake system, a warehouse management system, commodity tracking, supply chain management, fleet management, procurement, financial tracking and other asset and resource management capabilities': 'ist so konzipiert, dass es aus mehreren Untermodulen zu besteht. Diese arbeiten zusammen, um Organisationen komplexe Funktionalitäten zur Unterstützung von Hilfen und Durchführung von Projekten zur Verfügung zu stellen. Dies beinhaltet ein Aufnahmesystem, ein Warenlager Management System, Produkt-Tracking, Versorgungsketten-Management, Fahrzeugbestand Management, Beschaffungswesen, Finanz-Tracking und andere Bestands- und Resource Management Einsatzmöglichkeiten.',
'keeps track of all incoming tickets allowing them to be categorised & routed to the appropriate place for actioning.': 'Überwacht alle eingehenden Tickets, so dass diese entsprechend eingestuft und an die entsprechende Stelle zur Bearbeitung geleitet werden können.',
'latrines': 'Toiletten',
'leave empty to detach account': 'Leerlassen um das Konto zu entfernen/aufzuheben.',
'legend URL': 'URL zur Legende',
'less': 'weniger',
'light': 'lichtquelle',
'login': 'Anmeldung',
'long': 'lang',
'long>12cm': 'lang > 12cm',
'low': 'niedrig',
'male': 'männlich',
'manual': 'manuell',
'married': 'verheiratet',
'medium': 'mittel',
'medium<12cm': 'mittel < 12 cm',
'meters': 'meter',
'missing': 'fehlend',
'module allows the site administrator to configure various options.': 'Modul das dem Seitenadministrator ermöglicht verschiedene Optionen zu konfigurieren.',
'module helps monitoring the status of hospitals.': 'Modul das hilft den Status von Krankenhäusern zu überwachen',
'module provides a mechanism to collaboratively provide an overview of the developing disaster, using online mapping (GIS).': 'Modul das gemeinschaftlich einen Mechanismus bietet einen GIS-gestützen Überblick über die sich entwickelnde Lage zu erhalten.',
'more': 'mehr',
'n/a': 'nicht zutreffend',
'negroid': 'Negroid',
'never': 'nie',
'new record inserted': 'Neuen Datensatz eingefügt',
'new': 'neu',
'next 100 rows': 'Nächste 100 Zeilen',
'no': 'nein',
'none': 'nichts',
'not accessible - no cached version available!': 'Nicht verfügbar - keine zwischengespeicherte Version verfügbar!',
'not accessible - using cached version from': 'Nicht verfügbar - benutze zwischengespeicherte Version von',
'not specified': 'nicht angegeben',
'obsolete': 'obsolet',
'on': 'ein',
'once': 'einmal',
'open defecation': 'Verrichtung der Bedürfnisse im Freien',
'or import from csv file': 'oder aus CSV-Datei importieren',
'other': 'sonstige',
'over one hour': 'über eine Stunde',
'or drop here': "oder hier per Drag'n Drop ablegen",
'paid': 'bezahlt',
'people': 'Personen',
'pending': 'anstehend',
'piece': 'Stück',
'pit latrine': 'Grubenlatrine',
'pit': 'Grube',
'postponed': 'zurückgestellt',
'preliminary template or draft, not actionable in its current form': 'vorläufige Vorlage oder Entwurf, nicht aussagekräftig in seiner jetzigen Form',
'previous 100 rows': 'Vorherige 100 Zeilen',
'record does not exist': 'Datensatz ist nicht vorhanden',
'record id': 'Datensatz ID',
'red': 'rot',
'refused': 'zurückgewiesen',
'reports successfully imported.': 'Berichte erfolgreich importiert.',
'representation of the Polygon/Line.': 'Darstellung der Fläche/Linie.',
'retired': 'Außer Dienst',
'river': 'Fluss',
'see comment': 'siehe Kommentar',
'selected': 'ausgewählt',
'separated from family': 'von Familie getrennt',
'separated': 'getrennt',
'shaved': 'rasiert',
'short': 'kurz',
'short<6cm': 'kurz < 6cm',
'sides': 'Seiten',
'sign-up now': 'Jetzt Registrieren',
'single': 'alleinstehend',
'slim': 'dünn',
'specify': 'genauer beschreiben',
'staff members': 'Mitarbeiter',
'staff': 'Personal',
'state location': 'Beschaffenheit des Standort',
'state': 'Zustand',
'straight': 'gerade',
'suffered financial losses': 'Finanzielle Verluste erlitten',
'table': 'Tabelle',
'tall': 'groß',
'this': 'Dieses',
'to access the system': 'um auf das System zuzugreifen',
'tonsure': 'Tonsur',
'total': 'Summe',
'tweepy module not available within the running Python - this needs installing for non-Tropo Twitter support!': 'Tweepy Modul nicht verfügbar in der aktuellen Python Umgebung läuft - das benötigt die Installation einer none-Tropo Twitter Unterstützung!',
'unable to parse csv file': 'CSV Datei kann nicht analysiert werden',
'uncheck all': 'Alles deselektieren',
'unidentified': 'nicht identifiziert',
'unknown': 'unbekannt',
'unspecified': 'unspezifiziert',
'unverified': 'ungeprüft',
'updated': 'aktualisiert',
'updates only': 'nur Aktualisierungen',
'verified': 'verifiziert',
'volunteer': 'Freiwilliger',
'volunteers': 'Freiwillige',
'wavy': 'wellenförmige Lücke',
'weekly': 'wöchentlich',
'white': 'weiß',
'wider area, longer term, usually contain multiple Activities': 'Größerer Bereich, längere Sicht, enthält normalerweise mehrere Aktivitäten',
'widowed': 'verwitwet',
'within human habitat': 'In menschlichen Lebensraum',
'xlwt module not available within the running Python - this needs installing for XLS output!': 'xlwt Modul nicht verfügbar im Rahmen der laufenden Python Umgebung - das muss installiert werden für XLS Ausgabe!'
}
|
flavour/ifrc_qa
|
languages/de.py
|
Python
|
mit
| 303,278
|
[
"VisIt"
] |
1b7cdb264de06f904ad3952780f9afda2308ddcc1f2d22a767c6f2892f75d45a
|
#!/usr/bin/python
import numpy as np
import matplotlib.pyplot as plt
import scipy.ndimage as ndim
import scipy.misc as spm
import random,sys,time,os
import datetime
import multiprocessing as multi
import ctypes
import logging
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__name__)
#importing scene
try:
import ConfigParser as configparser
except ImportError: # we're on python 3
import configparser
import blackbody as bb
import bloom
import gc
import curses
#enums
METH_LEAPFROG = 0
METH_RK4 = 1
#rough option parsing
LOFI = False
DISABLE_DISPLAY = 0
DISABLE_SHUFFLING = 0
NTHREADS = 4
DRAWGRAPH = True
OVERRIDE_RES = False
SCENE_FNAME = 'scenes/default.scene'
CHUNKSIZE = 9000
for arg in sys.argv[1:]:
if arg == '-d':
LOFI = True
continue
if (arg == '--no-graph'):
DRAWGRAPH = False
continue
if arg == '--no-display':
DISABLE_DISPLAY = 1
continue
if arg == '--no-shuffle':
DISABLE_SHUFFLING = 1
continue
if (arg == '-o') or (arg == '--no-bs'):
DRAWGRAPH = False
DISABLE_DISPLAY = True
DISABLE_SHUFFLING = True
continue
if (arg[0:2] == '-c'):
CHUNKSIZE = int(arg[2:])
continue
if arg[0:2] == "-j":
NTHREADS = int(arg[2:])
continue
if arg[0:2] == "-r":
RESOLUTION = [int(x) for x in arg[2:].split('x')]
OVERRIDE_RES = True
if (len(RESOLUTION) != 2):
logger.error('''error: resolution "%s" unreadable''', arg[2:])
logger.error("please format resolution correctly (e.g.: -r640x480)")
exit()
continue
if arg[0] == '-':
logger.error("unrecognized option: %s", arg)
exit()
SCENE_FNAME = arg
if not os.path.isfile(SCENE_FNAME):
logger.error("scene file \"%s\" does not exist", SCENE_FNAME)
sys.exit(1)
defaults = {
"Distort":"1",
"Fogdo":"1",
"Blurdo":"1",
"Fogmult":"0.02",
"Diskinner":"1.5",
"Diskouter":"4",
"Resolution":"160,120",
"Diskmultiplier":"100.",
"Gain":"1",
"Normalize":"-1",
"Blurdo":"1",
"Bloomcut":"2.0",
"Airy_bloom":"1",
"Airy_radius":"1.",
"Iterations":"1000",
"Stepsize":"0.02",
"Cameraposition":"0.,1.,-10",
"Fieldofview":1.5,
"Lookat":"0.,0.,0.",
"Horizongrid":"1",
"Redshift":"1",
"sRGBOut":"1",
"Diskintensitydo":"1",
"sRGBIn":"1",
}
cfp = configparser.ConfigParser(defaults)
logger.debug("Reading scene %s...", SCENE_FNAME)
cfp.read(SCENE_FNAME)
FOGSKIP = 1
METHOD = METH_RK4
#enums to avoid per-iteration string comparisons
ST_NONE = 0
ST_TEXTURE = 1
ST_FINAL = 2
st_dict = {
"none":ST_NONE,
"texture":ST_TEXTURE,
"final":ST_FINAL
}
DT_NONE = 0
DT_TEXTURE = 1
DT_SOLID = 2
DT_GRID = 3
DT_BLACKBODY = 4
dt_dict = {
"none":DT_NONE,
"texture":DT_TEXTURE,
"solid":DT_SOLID,
"grid":DT_GRID,
"blackbody":DT_BLACKBODY
}
#this section works, but only if the .scene file is good
#if there's anything wrong, it's a trainwreck
#must rewrite
try:
if not OVERRIDE_RES:
RESOLUTION = [int(x) for x in cfp.get('lofi','Resolution').split(',')]
NITER = int(cfp.get('lofi','Iterations'))
STEP = float(cfp.get('lofi','Stepsize'))
except (KeyError, configparser.NoSectionError):
logger.debug("error reading scene file: insufficient data in lofi section")
logger.debug("using defaults.")
if not LOFI:
try:
if not OVERRIDE_RES:
RESOLUTION = [int(x) for x in cfp.get('hifi','Resolution').split(',')]
NITER = int(cfp.get('hifi','Iterations'))
STEP = float(cfp.get('hifi','Stepsize'))
except (KeyError, configparser.NoSectionError):
logger.debug("no data in hifi section. Using lofi/defaults.")
try:
CAMERA_POS = [float(x) for x in cfp.get('geometry','Cameraposition').split(',')]
TANFOV = float(cfp.get('geometry','Fieldofview'))
LOOKAT = np.array([float(x) for x in cfp.get('geometry','Lookat').split(',')])
UPVEC = np.array([float(x) for x in cfp.get('geometry','Upvector').split(',')])
DISTORT = int(cfp.get('geometry','Distort'))
DISKINNER = float(cfp.get('geometry','Diskinner'))
DISKOUTER = float(cfp.get('geometry','Diskouter'))
#options for 'blackbody' disktexture
DISK_MULTIPLIER = float(cfp.get('materials','Diskmultiplier'))
#DISK_ALPHA_MULTIPLIER = float(cfp.get('materials','Diskalphamultiplier'))
DISK_INTENSITY_DO = int(cfp.get('materials','Diskintensitydo'))
REDSHIFT = float(cfp.get('materials','Redshift'))
GAIN = float(cfp.get('materials','Gain'))
NORMALIZE = float(cfp.get('materials','Normalize'))
BLOOMCUT = float(cfp.get('materials','Bloomcut'))
except (KeyError, configparser.NoSectionError):
logger.debug("error reading scene file: insufficient data in geometry section")
logger.debug("using defaults.")
try:
HORIZON_GRID = int(cfp.get('materials','Horizongrid'))
DISK_TEXTURE = cfp.get('materials','Disktexture')
SKY_TEXTURE = cfp.get('materials','Skytexture')
SKYDISK_RATIO = float(cfp.get('materials','Skydiskratio'))
FOGDO = int(cfp.get('materials','Fogdo'))
BLURDO = int(cfp.get('materials','Blurdo'))
AIRY_BLOOM = int(cfp.get('materials','Airy_bloom'))
AIRY_RADIUS = float(cfp.get('materials','Airy_radius'))
FOGMULT = float(cfp.get('materials','Fogmult'))
#perform linear rgb->srgb conversion
SRGBOUT = int(cfp.get('materials','sRGBOut'))
SRGBIN = int(cfp.get('materials','sRGBIn'))
except (KeyError, configparser.NoSectionError):
logger.debug("error reading scene file: insufficient data in materials section")
logger.debug("using defaults.")
# converting mode strings to mode ints
try:
DISK_TEXTURE_INT = dt_dict[DISK_TEXTURE]
except KeyError:
logger.debug("Error: %s is not a valid accretion disc rendering mode", DISK_TEXTURE)
sys.exit(1)
try:
SKY_TEXTURE_INT = st_dict[SKY_TEXTURE]
except KeyError:
logger.debug("Error: %s is not a valid sky rendering mode", SKY_TEXTURE)
sys.exit(1)
logger.debug("%dx%d", RESOLUTION[0], RESOLUTION[1])
#just ensuring it's an np.array() and not a tuple/list
CAMERA_POS = np.array(CAMERA_POS)
#ensure the observer's 4-velocity is timelike
#since as of now the observer is schwarzschild stationary, we just need to check
#whether he's outside the horizon.
if np.linalg.norm(CAMERA_POS) <= 1.:
logger.debug("Error: the observer's 4-velocity is not timelike.")
logger.debug("(try placing the observer outside the event horizon)")
sys.exit(1)
DISKINNERSQR = DISKINNER*DISKINNER
DISKOUTERSQR = DISKOUTER*DISKOUTER
#ensuring existence of tests directory
if not os.path.exists("tests"):
os.makedirs("tests")
#GRAPH
if DRAWGRAPH:
logger.debug("Drawing schematic graph...")
g_diskout = plt.Circle((0,0),DISKOUTER, fc='0.75')
g_diskin = plt.Circle((0,0),DISKINNER, fc='white')
g_photon = plt.Circle((0,0),1.5,ec='y',fc='none')
g_horizon = plt.Circle((0,0),1,color='black')
g_cameraball = plt.Circle((CAMERA_POS[2],CAMERA_POS[0]),0.2,color='black')
figure = plt.gcf()
ax = plt.gca()
ax.cla()
gscale = 1.1*np.linalg.norm(CAMERA_POS)
ax.set_xlim((-gscale,gscale))
ax.set_ylim((-gscale,gscale))
ax.set_aspect('equal')
l = 100
ax.plot([CAMERA_POS[2],LOOKAT[2]] , [CAMERA_POS[0],LOOKAT[0]] , color='0.05', linestyle='-')
figure.gca().add_artist(g_diskout)
figure.gca().add_artist(g_diskin)
figure.gca().add_artist(g_horizon)
figure.gca().add_artist(g_photon)
figure.gca().add_artist(g_cameraball)
logger.debug("Saving diagram...")
figure.savefig('tests/graph.png')
ax.cla()
# these need to be here
# convert from linear rgb to srgb
def rgbtosrgb(arr):
logger.debug("RGB -> sRGB...")
#see https://en.wikipedia.org/wiki/SRGB#Specification_of_the_transformation
mask = arr > 0.0031308
arr[mask] **= 1/2.4
arr[mask] *= 1.055
arr[mask] -= 0.055
arr[-mask] *= 12.92
# convert from srgb to linear rgb
def srgbtorgb(arr):
logger.debug("sRGB -> RGB...")
mask = arr > 0.04045
arr[mask] += 0.055
arr[mask] /= 1.055
arr[mask] **= 2.4
arr[-mask] /= 12.92
logger.debug("Loading textures...")
if SKY_TEXTURE == 'texture':
texarr_sky = spm.imread('textures/bgedit.jpg')
# must convert to float here so we can work in linear colour
texarr_sky = texarr_sky.astype(float)
texarr_sky /= 255.0
if SRGBIN:
# must do this before resizing to get correct results
srgbtorgb(texarr_sky)
if not LOFI:
# maybe doing this manually and then loading is better.
logger.debug("(zooming sky texture...)")
texarr_sky = spm.imresize(texarr_sky,2.0,interp='bicubic')
# imresize converts back to uint8 for whatever reason
texarr_sky = texarr_sky.astype(float)
texarr_sky /= 255.0
texarr_disk = None
if DISK_TEXTURE == 'texture':
texarr_disk = spm.imread('textures/adisk.jpg')
if DISK_TEXTURE == 'test':
texarr_disk = spm.imread('textures/adisktest.jpg')
if texarr_disk is not None:
# must convert to float here so we can work in linear colour
texarr_disk = texarr_disk.astype(float)
texarr_disk /= 255.0
if SRGBIN:
srgbtorgb(texarr_disk)
#defining texture lookup
def lookup(texarr,uvarrin): #uvarrin is an array of uv coordinates
uvarr = np.clip(uvarrin,0.0,0.999)
uvarr[:,0] *= float(texarr.shape[1])
uvarr[:,1] *= float(texarr.shape[0])
uvarr = uvarr.astype(int)
return texarr[ uvarr[:,1], uvarr[:,0] ]
logger.debug("Computing rotation matrix...")
# this is just standard CGI vector algebra
FRONTVEC = (LOOKAT-CAMERA_POS)
FRONTVEC = FRONTVEC / np.linalg.norm(FRONTVEC)
LEFTVEC = np.cross(UPVEC,FRONTVEC)
LEFTVEC = LEFTVEC/np.linalg.norm(LEFTVEC)
NUPVEC = np.cross(FRONTVEC,LEFTVEC)
viewMatrix = np.zeros((3,3))
viewMatrix[:,0] = LEFTVEC
viewMatrix[:,1] = NUPVEC
viewMatrix[:,2] = FRONTVEC
#array [0,1,2,...,numPixels]
pixelindices = np.arange(0,RESOLUTION[0]*RESOLUTION[1],1)
#total number of pixels
numPixels = pixelindices.shape[0]
logger.debug("Generated %d pixel flattened array.", numPixels)
#useful constant arrays
ones = np.ones((numPixels))
ones3 = np.ones((numPixels,3))
UPFIELD = np.outer(ones,np.array([0.,1.,0.]))
#random sample of floats
ransample = np.random.random_sample((numPixels))
def vec3a(vec): #returns a constant 3-vector array (don't use for varying vectors)
return np.outer(ones,vec)
def vec3(x,y,z):
return vec3a(np.array([x,y,z]))
def norm(vec):
# you might not believe it, but this is the fastest way of doing this
# there's a stackexchange answer about this
return np.sqrt(np.einsum('...i,...i',vec,vec))
def normalize(vec):
#return vec/ (np.outer(norm(vec),np.array([1.,1.,1.])))
return vec / (norm(vec)[:,np.newaxis])
# an efficient way of computing the sixth power of r
# much faster than pow!
# np has this optimization for power(a,2)
# but not for power(a,3)!
def sqrnorm(vec):
return np.einsum('...i,...i',vec,vec)
def sixth(v):
tmp = sqrnorm(v)
return tmp*tmp*tmp
def RK4f(y,h2):
f = np.zeros(y.shape)
f[:,0:3] = y[:,3:6]
f[:,3:6] = - 1.5 * h2 * y[:,0:3] / np.power(sqrnorm(y[:,0:3]),2.5)[:,np.newaxis]
return f
# this blends colours ca and cb by placing ca in front of cb
def blendcolors(cb,balpha,ca,aalpha):
#* np.outer(aalpha, np.array([1.,1.,1.])) + \
#return ca + cb * np.outer(balpha*(1.-aalpha),np.array([1.,1.,1.]))
return ca + cb * (balpha*(1.-aalpha))[:,np.newaxis]
# this is for the final alpha channel after blending
def blendalpha(balpha,aalpha):
return aalpha + balpha*(1.-aalpha)
def saveToImg(arr,fname):
logger.debug(" - saving %s...", fname)
#copy
imgout = np.array(arr)
#clip
imgout = np.clip(imgout,0.0,1.0)
#rgb->srgb
if SRGBOUT:
rgbtosrgb(imgout)
#unflattening
imgout = imgout.reshape((RESOLUTION[1],RESOLUTION[0],3))
plt.imsave(fname,imgout)
# this is not just for bool, also for floats (as grayscale)
def saveToImgBool(arr,fname):
saveToImg(np.outer(arr,np.array([1.,1.,1.])),fname)
#for shared arrays
def tonumpyarray(mp_arr):
a = np.frombuffer(mp_arr.get_obj(), dtype=np.float32)
a.shape = ((numPixels,3))
return a
#PARTITIONING
#partition viewport in contiguous chunks
#CHUNKSIZE = 9000
if not DISABLE_SHUFFLING:
np.random.shuffle(pixelindices)
chunks = np.array_split(pixelindices,numPixels/CHUNKSIZE + 1)
NCHUNKS = len(chunks)
logger.debug("Split into %d chunks of %d pixels each", NCHUNKS, chunks[0].shape[0])
total_colour_buffer_preproc_shared = multi.Array(ctypes.c_float, numPixels * 3)
total_colour_buffer_preproc = tonumpyarray(total_colour_buffer_preproc_shared)
#open preview window
if not DISABLE_DISPLAY:
logger.debug("Opening display...")
plt.ion()
plt.imshow(total_colour_buffer_preproc.reshape((RESOLUTION[1],RESOLUTION[0],3)))
plt.draw()
#shuffle chunk list (does very good for equalizing load)
random.shuffle(chunks)
#partition chunk list in schedules for single threads
schedules = []
#from http://stackoverflow.com/questions/2659900/python-slicing-a-list-into-n-nearly-equal-length-partitions
q,r = divmod(NCHUNKS, NTHREADS)
indices = [q*i + min(i,r) for i in range(NTHREADS+1)]
for i in range(NTHREADS):
schedules.append(chunks[ indices[i]:indices[i+1] ])
logger.debug("Split list into %d schedules with %s chunks each", NTHREADS, ", ".join([str(len(s)) for s in schedules]))
# global clock start
start_time = time.time()
itcounters = [0 for i in range(NTHREADS)]
chnkcounters = [0 for i in range(NTHREADS)]
#killers
killers = [False for i in range(NTHREADS)]
# command line output
class Outputter:
def name(self,num):
if num == -1:
return "M"
else:
return str(num)
def __init__(self):
self.message = {}
self.queue = multi.Queue()
self.stdscr = curses.initscr()
curses.noecho()
for i in range(NTHREADS):
self.message[i] = "..."
self.message[-1] = "..."
def doprint(self):
for i in range(NTHREADS + 1):
self.stdscr.addstr(
i, 0, self.name(i - 1) + "] " + self.message[i - 1])
self.stdscr.refresh()
def parsemessages(self):
doref = False
while not self.queue.empty():
i,m = self.queue.get()
self.setmessage(m, i)
doref = True
if doref:
self.doprint()
def setmessage(self,mess,i):
self.message[i] = mess.ljust(60)
#self.doprint()
def __del__(self):
try:
curses.echo()
curses.endwin()
print('\n'*(NTHREADS+1))
except:
pass
output = Outputter()
def format_time(secs):
if secs < 60:
return "%d s"%secs
if secs < 60*3:
return "%d m %d s"%divmod(secs,60)
return "%d min"%(secs/60)
def showprogress(messtring,i,queue):
global start_time
elapsed_time = time.time() - start_time
progress = float(itcounters[i])/(len(schedules[i])*NITER)
try:
ETA = elapsed_time / progress * (1-progress)
except ZeroDivisionError:
ETA = 0
mes = "%d%%, %s remaining. Chunk %d/%d, %s"%(
int(100*progress),
format_time(ETA),
chnkcounters[i],
len(schedules[i]),
messtring.ljust(30)
)
queue.put((i,mes))
#def showprogress(m,i):
# pass
def raytrace_schedule(i,schedule,total_shared,q): # this is the function running on each thread
#global schedules,itcounters,chnkcounters,killers
if len(schedule) == 0:
return
total_colour_buffer_preproc = tonumpyarray(total_shared)
#schedule = schedules[i]
itcounters[i] = 0
chnkcounters[i]= 0
for chunk in schedule:
#if killers[i]:
# break
chnkcounters[i]+=1
#number of chunk pixels
numChunk = chunk.shape[0]
#useful constant arrays
ones = np.ones((numChunk))
ones3 = np.ones((numChunk,3))
UPFIELD = np.outer(ones,np.array([0.,1.,0.]))
BLACK = np.outer(ones,np.array([0.,0.,0.]))
#arrays of integer pixel coordinates
x = chunk % RESOLUTION[0]
y = chunk / RESOLUTION[0]
showprogress("Generating view vectors...",i,q)
#the view vector in 3D space
view = np.zeros((numChunk,3))
view[:,0] = x.astype(float)/RESOLUTION[0] - .5
view[:,1] = ((-y.astype(float)/RESOLUTION[1] + .5)*RESOLUTION[1])/RESOLUTION[0] #(inverting y coordinate)
view[:,2] = 1.0
view[:,0]*=TANFOV
view[:,1]*=TANFOV
#rotating through the view matrix
view = np.einsum('jk,ik->ij',viewMatrix,view)
#original position
point = np.outer(ones, CAMERA_POS)
normview = normalize(view)
velocity = np.copy(normview)
# initializing the colour buffer
object_colour = np.zeros((numChunk,3))
object_alpha = np.zeros(numChunk)
#squared angular momentum per unit mass (in the "Newtonian fantasy")
#h2 = np.outer(sqrnorm(np.cross(point,velocity)),np.array([1.,1.,1.]))
h2 = sqrnorm(np.cross(point,velocity))[:,np.newaxis]
pointsqr = np.copy(ones3)
for it in range(NITER):
itcounters[i]+=1
if it%150 == 1:
if killers[i]:
break
showprogress("Raytracing...",i,q)
# STEPPING
oldpoint = np.copy(point) #not needed for tracing. Useful for intersections
if METHOD == METH_LEAPFROG:
#leapfrog method here feels good
point += velocity * STEP
if DISTORT:
#this is the magical - 3/2 r^(-5) potential...
accel = - 1.5 * h2 * point / np.power(sqrnorm(point),2.5)[:,np.newaxis]
velocity += accel * STEP
elif METHOD == METH_RK4:
if DISTORT:
#simple step size control
rkstep = STEP
# standard Runge-Kutta
y = np.zeros((numChunk,6))
y[:,0:3] = point
y[:,3:6] = velocity
k1 = RK4f( y, h2)
k2 = RK4f( y + 0.5*rkstep*k1, h2)
k3 = RK4f( y + 0.5*rkstep*k2, h2)
k4 = RK4f( y + rkstep*k3, h2)
increment = rkstep/6. * (k1 + 2*k2 + 2*k3 + k4)
velocity += increment[:,3:6]
point += increment[:,0:3]
#useful precalcs
pointsqr = sqrnorm(point)
#phi = np.arctan2(point[:,0],point[:,2]) #too heavy. Better an instance wherever it's needed.
#normvel = normalize(velocity) #never used! BAD BAD BAD!!
# FOG
if FOGDO and (it%FOGSKIP == 0):
phsphtaper = np.clip(0.8*(pointsqr - 1.0),0.,1.0)
fogint = np.clip(FOGMULT * FOGSKIP * STEP / pointsqr,0.0,1.0) * phsphtaper
fogcol = ones3
object_colour = blendcolors(fogcol,fogint,object_colour,object_alpha)
object_alpha = blendalpha(fogint, object_alpha)
# CHECK COLLISIONS
# accretion disk
if DISK_TEXTURE_INT != DT_NONE:
mask_crossing = np.logical_xor( oldpoint[:,1] > 0., point[:,1] > 0.) #whether it just crossed the horizontal plane
mask_distance = np.logical_and((pointsqr < DISKOUTERSQR), (pointsqr > DISKINNERSQR)) #whether it's close enough
diskmask = np.logical_and(mask_crossing,mask_distance)
if (diskmask.any()):
#actual collision point by intersection
lambdaa = - point[:,1]/velocity[:,1]
colpoint = point + lambdaa[:,np.newaxis] * velocity
colpointsqr = sqrnorm(colpoint)
if DISK_TEXTURE_INT == DT_GRID:
phi = np.arctan2(colpoint[:,0],point[:,2])
theta = np.arctan2(colpoint[:,1],norm(point[:,[0,2]]))
diskcolor = np.outer(
np.mod(phi,0.52359) < 0.261799,
np.array([1.,1.,0.])
) + \
np.outer(ones,np.array([0.,0.,1.]) )
diskalpha = diskmask
elif DISK_TEXTURE_INT == DT_SOLID:
diskcolor = np.array([1.,1.,.98])
diskalpha = diskmask
elif DISK_TEXTURE_INT == DT_TEXTURE:
phi = np.arctan2(colpoint[:,0],point[:,2])
uv = np.zeros((numChunk,2))
uv[:,0] = ((phi+2*np.pi)%(2*np.pi))/(2*np.pi)
uv[:,1] = (np.sqrt(colpointsqr)-DISKINNER)/(DISKOUTER-DISKINNER)
diskcolor = lookup ( texarr_disk, np.clip(uv,0.,1.))
#alphamask = (2.0*ransample) < sqrnorm(diskcolor)
#diskmask = np.logical_and(diskmask, alphamask )
diskalpha = diskmask * np.clip(sqrnorm(diskcolor)/3.0,0.0,1.0)
elif DISK_TEXTURE_INT == DT_BLACKBODY:
temperature = np.exp(bb.disktemp(colpointsqr,9.2103))
if REDSHIFT:
R = np.sqrt(colpointsqr)
disc_velocity = 0.70710678 * \
np.power((np.sqrt(colpointsqr)-1.).clip(0.1),-.5)[:,np.newaxis] * \
np.cross(UPFIELD, normalize(colpoint))
gamma = np.power( 1 - sqrnorm(disc_velocity).clip(max=.99), -.5)
# opz = 1 + z
opz_doppler = gamma * ( 1. + np.einsum('ij,ij->i',disc_velocity,normalize(velocity)))
opz_gravitational = np.power(1.- 1/R.clip(1),-.5)
# (1+z)-redshifted Planck spectrum is still Planckian at temperature T
temperature /= (opz_doppler*opz_gravitational).clip(0.1)
intensity = bb.intensity(temperature)
if DISK_INTENSITY_DO:
diskcolor = np.einsum('ij,i->ij', bb.colour(temperature),DISK_MULTIPLIER*intensity)#np.maximum(1.*ones,DISK_MULTIPLIER*intensity))
else:
diskcolor = bb.colour(temperature)
iscotaper = np.clip((colpointsqr-DISKINNERSQR)*0.3,0.,1.)
outertaper = np.clip(temperature/1000. ,0.,1.)
diskalpha = diskmask * iscotaper * outertaper#np.clip(diskmask * DISK_ALPHA_MULTIPLIER *intensity,0.,1.)
object_colour = blendcolors(diskcolor,diskalpha,object_colour,object_alpha)
object_alpha = blendalpha(diskalpha, object_alpha)
# event horizon
oldpointsqr = sqrnorm(oldpoint)
mask_horizon = np.logical_and((pointsqr < 1),(sqrnorm(oldpoint) > 1) )
if mask_horizon.any() :
lambdaa = 1. - ((1.-oldpointsqr)/((pointsqr - oldpointsqr)))[:,np.newaxis]
colpoint = lambdaa * point + (1-lambdaa)*oldpoint
if HORIZON_GRID:
phi = np.arctan2(colpoint[:,0],point[:,2])
theta = np.arctan2(colpoint[:,1],norm(point[:,[0,2]]))
horizoncolour = np.outer( np.logical_xor(np.mod(phi,1.04719) < 0.52359,np.mod(theta,1.04719) < 0.52359), np.array([1.,0.,0.]))
else:
horizoncolour = BLACK#np.zeros((numPixels,3))
horizonalpha = mask_horizon
object_colour = blendcolors(horizoncolour,horizonalpha,object_colour,object_alpha)
object_alpha = blendalpha(horizonalpha, object_alpha)
showprogress("generating sky layer...",i,q)
vphi = np.arctan2(velocity[:,0],velocity[:,2])
vtheta = np.arctan2(velocity[:,1],norm(velocity[:,[0,2]]) )
vuv = np.zeros((numChunk,2))
vuv[:,0] = np.mod(vphi+4.5,2*np.pi)/(2*np.pi)
vuv[:,1] = (vtheta+np.pi/2)/(np.pi)
if SKY_TEXTURE_INT == DT_TEXTURE:
col_sky = lookup(texarr_sky,vuv)[:,0:3]
showprogress("generating debug layers...",i,q)
##debug color: direction of view vector
#dbg_viewvec = np.clip(view + vec3(.5,.5,0.0),0.0,1.0)
##debug color: direction of final ray
##debug color: grid
#dbg_grid = np.abs(normalize(velocity)) < 0.1
if SKY_TEXTURE_INT == ST_TEXTURE:
col_bg = col_sky
elif SKY_TEXTURE_INT == ST_NONE:
col_bg = np.zeros((numChunk,3))
elif SKY_TEXTURE_INT == ST_FINAL:
dbg_finvec = np.clip(normalize(velocity) + np.array([.5,.5,0.0])[np.newaxis,:],0.0,1.0)
col_bg = dbg_finvec
else:
col_bg = np.zeros((numChunk,3))
showprogress("blending layers...",i,q)
col_bg_and_obj = blendcolors(SKYDISK_RATIO*col_bg, ones ,object_colour,object_alpha)
showprogress("beaming back to mothership.",i,q)
# copy back in the buffer
if not DISABLE_SHUFFLING:
total_colour_buffer_preproc[chunk] = col_bg_and_obj
else:
total_colour_buffer_preproc[chunk[0]:(chunk[-1]+1)] = col_bg_and_obj
#refresh display
# NO: plt does not allow drawing outside main thread
#if not DISABLE_DISPLAY:
# showprogress("updating display...")
# plt.imshow(total_colour_buffer_preproc.reshape((RESOLUTION[1],RESOLUTION[0],3)))
# plt.draw()
showprogress("garbage collection...",i,q)
gc.collect()
showprogress("Done.",i,q)
# Threading
process_list = []
for i in range(NTHREADS):
p = multi.Process(target=raytrace_schedule,args=(i,schedules[i],total_colour_buffer_preproc_shared,output.queue))
process_list.append(p)
logger.debug("Starting threads...")
for proc in process_list:
proc.start()
try:
refreshcounter = 0
while True:
refreshcounter+=1
time.sleep(0.1)
output.parsemessages()
if not DISABLE_DISPLAY and (refreshcounter%40 == 0):
output.setmessage("Updating display...",-1)
plt.imshow(total_colour_buffer_preproc.reshape((RESOLUTION[1],RESOLUTION[0],3)))
plt.draw()
output.setmessage("Idle.", -1)
alldone = True
for i in range(NTHREADS):
if process_list[i].is_alive():
alldone = False
if alldone:
break
except KeyboardInterrupt:
for i in range(NTHREADS):
killers[i] = True
sys.exit()
del output
logger.debug("Done tracing.")
logger.debug("Total raytracing time: %s", datetime.timedelta(seconds=(time.time() - start_time)))
logger.debug("Postprocessing...")
#gain
logger.debug("- gain...")
total_colour_buffer_preproc *= GAIN
# airy bloom
if AIRY_BLOOM:
logger.debug("-computing Airy disk bloom...")
#blending bloom
#colour = total_colour_buffer_preproc + 0.3*blurd #0.2*dbg_grid + 0.8*dbg_finvec
#airy disk bloom
colour_bloomd = np.copy(total_colour_buffer_preproc)
colour_bloomd = colour_bloomd.reshape((RESOLUTION[1],RESOLUTION[0],3))
# the float constant is 1.22 * 650nm / (4 mm), the typical diffractive resolution
# of the human eye for red light. It's in radians, so we rescale using field of view.
radd = 0.00019825 * RESOLUTION[0] / np.arctan(TANFOV)
# the user is allowed to rescale the resolution, though
radd*=AIRY_RADIUS
# the pixel size of the kernel:
# 25 pixels radius is ok for 5.0 bright source pixel at 1920x1080, so...
# remembering that airy ~ 1/x^3, so if we want intensity/x^3 < hreshold =>
# => max_x = (intensity/threshold)^1/3
# so it scales with
# - the cube root of maximum intensity
# - linear in resolution
mxint = np.amax(colour_bloomd)
kern_radius = 25 * np.power( np.amax(colour_bloomd) / 5.0 , 1./3.) * RESOLUTION[0]/1920.
logger.debug("--(radius: %3f, kernel pixel radius: %3f, maximum source brightness: %3f)", radd, kern_radius, mxint)
colour_bloomd = bloom.airy_convolve(colour_bloomd,radd)
colour_bloomd = colour_bloomd.reshape((numPixels,3))
colour_pb = colour_bloomd
else:
colour_pb = total_colour_buffer_preproc
# wide gaussian (lighting dust effect)
if BLURDO:
logger.debug("-computing wide gaussian blur...")
#hipass = np.outer(sqrnorm(total_colour_buffer_preproc) > BLOOMCUT, np.array([1.,1.,1.])) * total_colour_buffer_preproc
blurd = np.copy(total_colour_buffer_preproc)
blurd = blurd.reshape((RESOLUTION[1],RESOLUTION[0],3))
for i in range(2):
logger.debug("- gaussian blur pass %d...", i)
blurd = ndim.gaussian_filter(blurd,int(0.05*RESOLUTION[0]))
blurd = blurd.reshape((numPixels,3))
colour = colour_pb + 0.2 * blurd
else:
colour = colour_pb
#normalization
if NORMALIZE > 0:
logger.debug("- normalizing...")
colour *= 1 / (NORMALIZE * np.amax(colour.flatten()) )
#final colour
colour = np.clip(colour,0.,1.)
logger.debug("Conversion to image and saving...")
saveToImg(colour,"tests/out.png")
saveToImg(total_colour_buffer_preproc,"tests/preproc.png")
if BLURDO:
saveToImg(colour_pb,"tests/postbloom.png")
|
rantonels/starless
|
tracer.py
|
Python
|
gpl-3.0
| 30,291
|
[
"Gaussian"
] |
efa683b271628c563dd2508d23d71692c2b62e5f324940a50cdce26f7b739608
|
# texttable - module for creating simple ASCII tables
# Copyright (C) 2003-2015 Gerome Fournier <jef(at)foutaise.org>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
"""module for creating simple ASCII tables
Example:
table = Texttable()
table.set_cols_align(["l", "r", "c"])
table.set_cols_valign(["t", "m", "b"])
table.add_rows([["Name", "Age", "Nickname"],
["Mr\\nXavier\\nHuon", 32, "Xav'"],
["Mr\\nBaptiste\\nClement", 1, "Baby"],
["Mme\\nLouise\\nBourgeau", 28, "Lou\\n\\nLoue"]])
print table.draw() + "\\n"
table = Texttable()
table.set_deco(Texttable.HEADER)
table.set_cols_dtype(['t', # text
'f', # float (decimal)
'e', # float (exponent)
'i', # integer
'a']) # automatic
table.set_cols_align(["l", "r", "r", "r", "l"])
table.add_rows([["text", "float", "exp", "int", "auto"],
["abcd", "67", 654, 89, 128.001],
["efghijk", 67.5434, .654, 89.6, 12800000000000000000000.00023],
["lmn", 5e-78, 5e-78, 89.4, .000000000000128],
["opqrstu", .023, 5e+78, 92., 12800000000000000000000]])
print table.draw()
Result:
+----------+-----+----------+
| Name | Age | Nickname |
+==========+=====+==========+
| Mr | | |
| Xavier | 32 | |
| Huon | | Xav' |
+----------+-----+----------+
| Mr | | |
| Baptiste | 1 | |
| Clement | | Baby |
+----------+-----+----------+
| Mme | | Lou |
| Louise | 28 | |
| Bourgeau | | Loue |
+----------+-----+----------+
text float exp int auto
===========================================
abcd 67.000 6.540e+02 89 128.001
efgh 67.543 6.540e-01 90 1.280e+22
ijkl 0.000 5.000e-78 89 0.000
mnop 0.023 5.000e+78 92 1.280e+22
"""
from __future__ import division
__all__ = ["Texttable", "ArraySizeError"]
__author__ = 'Gerome Fournier <jef(at)foutaise.org>'
__license__ = 'LGPL'
__version__ = '0.8.8'
__credits__ = """\
Jeff Kowalczyk:
- textwrap improved import
- comment concerning header output
Anonymous:
- add_rows method, for adding rows in one go
Sergey Simonenko:
- redefined len() function to deal with non-ASCII characters
Roger Lew:
- columns datatype specifications
Brian Peterson:
- better handling of unicode errors
Frank Sachsenheim:
- add Python 2/3-compatibility
Maximilian Hils:
- fix minor bug for Python 3 compatibility
frinkelpi:
- preserve empty lines
"""
import sys
import string
import unicodedata
try:
if sys.version >= '2.3':
import textwrap
elif sys.version >= '2.2':
from optparse import textwrap
else:
from optik import textwrap
except ImportError:
sys.stderr.write("Can't import textwrap module!\n")
raise
if sys.version >= '2.7':
from functools import reduce
if sys.version >= '3.0':
unicode_type = str
bytes_type = bytes
else:
unicode_type = unicode
bytes_type = str
def obj2unicode(obj):
"""Return a unicode representation of a python object
"""
if isinstance(obj, unicode_type):
return obj
elif isinstance(obj, bytes_type):
try:
return unicode_type(obj, 'utf-8')
except UnicodeDecodeError as strerror:
sys.stderr.write("UnicodeDecodeError exception for string '%s': %s\n" % (obj, strerror))
return unicode_type(obj, 'utf-8', 'replace')
else:
return unicode_type(obj)
def len(iterable):
"""Redefining len here so it will be able to work with non-ASCII characters
"""
if isinstance(iterable, bytes_type) or isinstance(iterable, unicode_type):
unicode_data = obj2unicode(iterable)
if hasattr(unicodedata, 'east_asian_width'):
w = unicodedata.east_asian_width
return sum([w(c) in 'WF' and 2 or 1 for c in unicode_data])
else:
return unicode_data.__len__()
else:
return iterable.__len__()
class ArraySizeError(Exception):
"""Exception raised when specified rows don't fit the required size
"""
def __init__(self, msg):
self.msg = msg
Exception.__init__(self, msg, '')
def __str__(self):
return self.msg
class Texttable:
BORDER = 1
HEADER = 1 << 1
HLINES = 1 << 2
VLINES = 1 << 3
def __init__(self, max_width=80):
"""Constructor
- max_width is an integer, specifying the maximum width of the table
- if set to 0, size is unlimited, therefore cells won't be wrapped
"""
if max_width <= 0:
max_width = False
self._max_width = max_width
self._precision = 3
self._deco = Texttable.VLINES | Texttable.HLINES | Texttable.BORDER | \
Texttable.HEADER
self.set_chars(['-', '|', '+', '='])
self.reset()
def reset(self):
"""Reset the instance
- reset rows and header
"""
self._hline_string = None
self._row_size = None
self._header = []
self._rows = []
def set_chars(self, array):
"""Set the characters used to draw lines between rows and columns
- the array should contain 4 fields:
[horizontal, vertical, corner, header]
- default is set to:
['-', '|', '+', '=']
"""
if len(array) != 4:
raise ArraySizeError("array should contain 4 characters")
array = [ x[:1] for x in [ str(s) for s in array ] ]
(self._char_horiz, self._char_vert,
self._char_corner, self._char_header) = array
def set_deco(self, deco):
"""Set the table decoration
- 'deco' can be a combinaison of:
Texttable.BORDER: Border around the table
Texttable.HEADER: Horizontal line below the header
Texttable.HLINES: Horizontal lines between rows
Texttable.VLINES: Vertical lines between columns
All of them are enabled by default
- example:
Texttable.BORDER | Texttable.HEADER
"""
self._deco = deco
def set_cols_align(self, array):
"""Set the desired columns alignment
- the elements of the array should be either "l", "c" or "r":
* "l": column flushed left
* "c": column centered
* "r": column flushed right
"""
self._check_row_size(array)
self._align = array
def set_cols_valign(self, array):
"""Set the desired columns vertical alignment
- the elements of the array should be either "t", "m" or "b":
* "t": column aligned on the top of the cell
* "m": column aligned on the middle of the cell
* "b": column aligned on the bottom of the cell
"""
self._check_row_size(array)
self._valign = array
def set_cols_dtype(self, array):
"""Set the desired columns datatype for the cols.
- the elements of the array should be either "a", "t", "f", "e" or "i":
* "a": automatic (try to use the most appropriate datatype)
* "t": treat as text
* "f": treat as float in decimal format
* "e": treat as float in exponential format
* "i": treat as int
- by default, automatic datatyping is used for each column
"""
self._check_row_size(array)
self._dtype = array
def set_cols_width(self, array):
"""Set the desired columns width
- the elements of the array should be integers, specifying the
width of each column. For example:
[10, 20, 5]
"""
self._check_row_size(array)
try:
array = list(map(int, array))
if reduce(min, array) <= 0:
raise ValueError
except ValueError:
sys.stderr.write("Wrong argument in column width specification\n")
raise
self._width = array
def set_precision(self, width):
"""Set the desired precision for float/exponential formats
- width must be an integer >= 0
- default value is set to 3
"""
if not type(width) is int or width < 0:
raise ValueError('width must be an integer greater then 0')
self._precision = width
def header(self, array):
"""Specify the header of the table
"""
self._check_row_size(array)
self._header = list(map(obj2unicode, array))
def add_row(self, array):
"""Add a row in the rows stack
- cells can contain newlines and tabs
"""
self._check_row_size(array)
if not hasattr(self, "_dtype"):
self._dtype = ["a"] * self._row_size
cells = []
for i, x in enumerate(array):
cells.append(self._str(i, x))
self._rows.append(cells)
def add_rows(self, rows, header=True):
"""Add several rows in the rows stack
- The 'rows' argument can be either an iterator returning arrays,
or a by-dimensional array
- 'header' specifies if the first row should be used as the header
of the table
"""
# nb: don't use 'iter' on by-dimensional arrays, to get a
# usable code for python 2.1
if header:
if hasattr(rows, '__iter__') and hasattr(rows, 'next'):
self.header(rows.next())
else:
self.header(rows[0])
rows = rows[1:]
for row in rows:
self.add_row(row)
def draw(self):
"""Draw the table
- the table is returned as a whole string
"""
if not self._header and not self._rows:
return
self._compute_cols_width()
self._check_align()
out = ""
if self._has_border():
out += self._hline()
if self._header:
out += self._draw_line(self._header, isheader=True)
if self._has_header():
out += self._hline_header()
length = 0
for row in self._rows:
length += 1
out += self._draw_line(row)
if self._has_hlines() and length < len(self._rows):
out += self._hline()
if self._has_border():
out += self._hline()
return out[:-1]
def _str(self, i, x):
"""Handles string formatting of cell data
i - index of the cell datatype in self._dtype
x - cell data to format
"""
try:
f = float(x)
except:
return obj2unicode(x)
n = self._precision
dtype = self._dtype[i]
if dtype == 'i':
return str(int(round(f)))
elif dtype == 'f':
return '%.*f' % (n, f)
elif dtype == 'e':
return '%.*e' % (n, f)
elif dtype == 't':
return obj2unicode(x)
else:
if f - round(f) == 0:
if abs(f) > 1e8:
return '%.*e' % (n, f)
else:
return str(int(round(f)))
else:
if abs(f) > 1e8:
return '%.*e' % (n, f)
else:
return '%.*f' % (n, f)
def _check_row_size(self, array):
"""Check that the specified array fits the previous rows size
"""
if not self._row_size:
self._row_size = len(array)
elif self._row_size != len(array):
raise ArraySizeError("array should contain %d elements" \
% self._row_size)
def _has_vlines(self):
"""Return a boolean, if vlines are required or not
"""
return self._deco & Texttable.VLINES > 0
def _has_hlines(self):
"""Return a boolean, if hlines are required or not
"""
return self._deco & Texttable.HLINES > 0
def _has_border(self):
"""Return a boolean, if border is required or not
"""
return self._deco & Texttable.BORDER > 0
def _has_header(self):
"""Return a boolean, if header line is required or not
"""
return self._deco & Texttable.HEADER > 0
def _hline_header(self):
"""Print header's horizontal line
"""
return self._build_hline(True)
def _hline(self):
"""Print an horizontal line
"""
if not self._hline_string:
self._hline_string = self._build_hline()
return self._hline_string
def _build_hline(self, is_header=False):
"""Return a string used to separated rows or separate header from
rows
"""
horiz = self._char_horiz
if (is_header):
horiz = self._char_header
# compute cell separator
s = "%s%s%s" % (horiz, [horiz, self._char_corner][self._has_vlines()],
horiz)
# build the line
l = s.join([horiz * n for n in self._width])
# add border if needed
if self._has_border():
l = "%s%s%s%s%s\n" % (self._char_corner, horiz, l, horiz,
self._char_corner)
else:
l += "\n"
return l
def _len_cell(self, cell):
"""Return the width of the cell
Special characters are taken into account to return the width of the
cell, such like newlines and tabs
"""
cell_lines = cell.split('\n')
maxi = 0
for line in cell_lines:
length = 0
parts = line.split('\t')
for part, i in zip(parts, list(range(1, len(parts) + 1))):
length = length + len(part)
if i < len(parts):
length = (length//8 + 1) * 8
maxi = max(maxi, length)
return maxi
def _compute_cols_width(self):
"""Return an array with the width of each column
If a specific width has been specified, exit. If the total of the
columns width exceed the table desired width, another width will be
computed to fit, and cells will be wrapped.
"""
if hasattr(self, "_width"):
return
maxi = []
if self._header:
maxi = [ self._len_cell(x) for x in self._header ]
for row in self._rows:
for cell,i in zip(row, list(range(len(row)))):
try:
maxi[i] = max(maxi[i], self._len_cell(cell))
except (TypeError, IndexError):
maxi.append(self._len_cell(cell))
ncols = len(maxi)
content_width = sum(maxi)
deco_width = 3*(ncols-1) + [0,4][self._has_border()]
if self._max_width and (content_width + deco_width) > self._max_width:
""" content too wide to fit the expected max_width
let's recompute maximum cell width for each cell
"""
if self._max_width < (ncols + deco_width):
raise ValueError('max_width too low to render data')
available_width = self._max_width - deco_width
newmaxi = [0] * ncols
i = 0
while available_width > 0:
if newmaxi[i] < maxi[i]:
newmaxi[i] += 1
available_width -= 1
i = (i + 1) % ncols
maxi = newmaxi
self._width = maxi
def _check_align(self):
"""Check if alignment has been specified, set default one if not
"""
if not hasattr(self, "_align"):
self._align = ["l"] * self._row_size
if not hasattr(self, "_valign"):
self._valign = ["t"] * self._row_size
def _draw_line(self, line, isheader=False):
"""Draw a line
Loop over a single cell length, over all the cells
"""
line = self._splitit(line, isheader)
space = " "
out = ""
for i in range(len(line[0])):
if self._has_border():
out += "%s " % self._char_vert
length = 0
for cell, width, align in zip(line, self._width, self._align):
length += 1
cell_line = cell[i]
fill = width - len(cell_line)
if isheader:
align = "c"
if align == "r":
out += fill * space + cell_line
elif align == "c":
out += (int(fill/2) * space + cell_line \
+ int(fill/2 + fill%2) * space)
else:
out += cell_line + fill * space
if length < len(line):
out += " %s " % [space, self._char_vert][self._has_vlines()]
out += "%s\n" % ['', space + self._char_vert][self._has_border()]
return out
def _splitit(self, line, isheader):
"""Split each element of line to fit the column width
Each element is turned into a list, result of the wrapping of the
string to the desired width
"""
line_wrapped = []
for cell, width in zip(line, self._width):
array = []
for c in cell.split('\n'):
if c.strip() == "":
array.append("")
else:
array.extend(textwrap.wrap(c, width))
line_wrapped.append(array)
max_cell_lines = reduce(max, list(map(len, line_wrapped)))
for cell, valign in zip(line_wrapped, self._valign):
if isheader:
valign = "t"
if valign == "m":
missing = max_cell_lines - len(cell)
cell[:0] = [""] * int(missing / 2)
cell.extend([""] * int(missing / 2 + missing % 2))
elif valign == "b":
cell[:0] = [""] * (max_cell_lines - len(cell))
else:
cell.extend([""] * (max_cell_lines - len(cell)))
return line_wrapped
if __name__ == '__main__':
table = Texttable()
table.set_cols_align(["l", "r", "c"])
table.set_cols_valign(["t", "m", "b"])
table.add_rows([["Name", "Age", "Nickname"],
["Mr\nXavier\nHuon", 32, "Xav'"],
["Mr\nBaptiste\nClement", 1, "Baby"],
["Mme\nLouise\nBourgeau", 28, "Lou\n \nLoue"]])
print(table.draw() + "\n")
table = Texttable()
table.set_deco(Texttable.HEADER)
table.set_cols_dtype(['t', # text
'f', # float (decimal)
'e', # float (exponent)
'i', # integer
'a']) # automatic
table.set_cols_align(["l", "r", "r", "r", "l"])
table.add_rows([["text", "float", "exp", "int", "auto"],
["abcd", "67", 654, 89, 128.001],
["efghijk", 67.5434, .654, 89.6, 12800000000000000000000.00023],
["lmn", 5e-78, 5e-78, 89.4, .000000000000128],
["opqrstu", .023, 5e+78, 92., 12800000000000000000000]])
print(table.draw())
|
hanak/artshow-keeper
|
artshowkeeper/common/texttable.py
|
Python
|
gpl-3.0
| 20,175
|
[
"Brian"
] |
2ac9ca232d5b7a2cbbb7816c5dc33e378308fb6d0b2aa75240e548e82d63b1df
|
""" A viewer for mlab scene. Adds a button to open up the engine.
"""
# Author: Gael Varoquaux <gael dot varoquaux at normalesup dot org>
# Copyright (c) 2008, Enthought, Inc.
# License: BSD Style.
# Standard library imports
from os.path import join
# Enthought library imports
from tvtk.tools.ivtk import IVTK
from tvtk.pyface.api import DecoratedScene
from traits.api import Callable
from pyface.api import ImageResource
from pyface.action.api import Action, Group
from pyface.resource.api import resource_path
# Local imports
from mayavi.core.common import error
from mayavi.preferences.api import set_scene_preferences, \
get_scene_preferences
###############################################################################
# A decorated scene with an additional button.
###############################################################################
class MayaviScene(DecoratedScene):
""" A scene UI, similar to a decorated scene, but with more buttons.
"""
image_search_path = [join(resource_path(), 'images'), ]
##########################################################################
# Non-public interface.
##########################################################################
def show_engine(self):
""" Open the engine view corresponding to the engine of the
scene.
"""
from mayavi.core.registry import registry
from mayavi.core.ui.engine_rich_view import EngineRichView
try:
engine = registry.find_scene_engine(self)
except TypeError:
error('This scene is not managed by Mayavi')
return EngineRichView(engine=engine).scene_editing_view(scene=self)
######################################################################
# Trait handlers.
######################################################################
def _actions_default(self):
actions = [ Group(
Action(tooltip="View the Mayavi pipeline",
image=ImageResource('m2',
search_path=self.image_search_path),
on_perform=self.show_engine,
),
),
]
actions.extend(DecoratedScene._actions_default(self))
return actions
def mayavi_scene_factory(parent):
"""A mayavi scene factory that creates a scene with preferences
appropriately set."""
p = get_scene_preferences()
s = MayaviScene(parent, stereo=p['stereo'])
set_scene_preferences(s, p)
return s
###############################################################################
# A viewer making use of the MayaviScene
###############################################################################
class MayaviViewer(IVTK):
""" A viewer window for mlab.
"""
_scene_factory = Callable(mayavi_scene_factory)
def _size_default(self):
return (400, 300)
def viewer_factory(size=(400, 350)):
viewer = MayaviViewer()
viewer.menu_bar_manager = None
viewer.size=size
viewer.open()
return viewer
if __name__ == '__main__':
from mayavi.tools.show import show
viewer_factory()
show()
|
dmsurti/mayavi
|
mayavi/core/ui/mayavi_scene.py
|
Python
|
bsd-3-clause
| 3,226
|
[
"Mayavi"
] |
34087cddb57b42ebbc3c7c034e0e940443c0c55fa3ec3a8385812be2a321958c
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.