text stringlengths 12 1.05M | repo_name stringlengths 5 86 | path stringlengths 4 191 | language stringclasses 1 value | license stringclasses 15 values | size int32 12 1.05M | keyword listlengths 1 23 | text_hash stringlengths 64 64 |
|---|---|---|---|---|---|---|---|
#!/usr/bin/python
#
# Created on Aug 25, 2016
# @author: Gaurav Rastogi (grastogi@avinetworks.com)
# Eric Anderson (eanderson@avinetworks.com)
# module_check: supported
# Avi Version: 16.3.8
#
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'status': ['preview'], 'supported_by': 'community', 'version': '1.0'}
DOCUMENTATION = '''
---
module: avi_sslkeyandcertificate
author: Gaurav Rastogi (grastogi@avinetworks.com)
short_description: Module for setup of SSLKeyAndCertificate Avi RESTful Object
description:
- This module is used to configure SSLKeyAndCertificate object
- more examples at U(https://github.com/avinetworks/devops)
requirements: [ avisdk ]
version_added: "2.3"
options:
state:
description:
- The state that should be applied on the entity.
default: present
choices: ["absent","present"]
ca_certs:
description:
- Ca certificates in certificate chain.
certificate:
description:
- Sslcertificate settings for sslkeyandcertificate.
required: true
certificate_management_profile_ref:
description:
- It is a reference to an object of type certificatemanagementprofile.
created_by:
description:
- Creator name.
dynamic_params:
description:
- Dynamic parameters needed for certificate management profile.
enckey_base64:
description:
- Encrypted private key corresponding to the private key (e.g.
- Those generated by an hsm such as thales nshield).
enckey_name:
description:
- Name of the encrypted private key (e.g.
- Those generated by an hsm such as thales nshield).
hardwaresecuritymodulegroup_ref:
description:
- It is a reference to an object of type hardwaresecuritymodulegroup.
key:
description:
- Private key.
key_params:
description:
- Sslkeyparams settings for sslkeyandcertificate.
name:
description:
- Name of the object.
required: true
status:
description:
- Status of sslkeyandcertificate.
- Default value when not specified in API or module is interpreted by Avi Controller as SSL_CERTIFICATE_FINISHED.
tenant_ref:
description:
- It is a reference to an object of type tenant.
type:
description:
- Type of sslkeyandcertificate.
- Default value when not specified in API or module is interpreted by Avi Controller as SSL_CERTIFICATE_TYPE_VIRTUALSERVICE.
url:
description:
- Avi controller URL of the object.
uuid:
description:
- Unique object identifier of the object.
extends_documentation_fragment:
- avi
'''
EXAMPLES = '''
- name: Create a SSL Key and Certificate
avi_sslkeyandcertificate:
controller: 10.10.27.90
username: admin
password: AviNetworks123!
key: |
-----BEGIN PRIVATE KEY-----
....
-----END PRIVATE KEY-----
certificate:
self_signed: true
certificate: |
-----BEGIN CERTIFICATE-----
....
-----END CERTIFICATE-----
type: SSL_CERTIFICATE_TYPE_VIRTUALSERVICE
name: MyTestCert
'''
RETURN = '''
obj:
description: SSLKeyAndCertificate (api/sslkeyandcertificate) object
returned: success, changed
type: dict
'''
from ansible.module_utils.basic import AnsibleModule
try:
from ansible.module_utils.avi_ansible_utils import (
avi_common_argument_spec, HAS_AVI, avi_ansible_api)
except ImportError:
HAS_AVI = False
def main():
argument_specs = dict(
state=dict(default='present',
choices=['absent', 'present']),
ca_certs=dict(type='list',),
certificate=dict(type='dict', required=True),
certificate_management_profile_ref=dict(type='str',),
created_by=dict(type='str',),
dynamic_params=dict(type='list',),
enckey_base64=dict(type='str',),
enckey_name=dict(type='str',),
hardwaresecuritymodulegroup_ref=dict(type='str',),
key=dict(type='str',),
key_params=dict(type='dict',),
name=dict(type='str', required=True),
status=dict(type='str',),
tenant_ref=dict(type='str',),
type=dict(type='str',),
url=dict(type='str',),
uuid=dict(type='str',),
)
argument_specs.update(avi_common_argument_spec())
module = AnsibleModule(
argument_spec=argument_specs, supports_check_mode=True)
if not HAS_AVI:
return module.fail_json(msg=(
'Avi python API SDK (avisdk>=16.3.5.post1) is not installed. '
'For more details visit https://github.com/avinetworks/sdk.'))
return avi_ansible_api(module, 'sslkeyandcertificate',
set(['key']))
if __name__ == '__main__':
main()
| 0x46616c6b/ansible | lib/ansible/modules/network/avi/avi_sslkeyandcertificate.py | Python | gpl-3.0 | 5,603 | [
"VisIt"
] | 41e72dad41389b2e2561cc05f5af044577b1aea0eed2db8481df6a764ee47bfa |
# !usr/bin/env python2
# -*- coding: utf-8 -*-
#
# Licensed under a 3-clause BSD license.
#
# @Author: Brian Cherinka
# @Date: 2017-02-22 10:38:28
# @Last modified by: Brian Cherinka
# @Last modified time: 2017-07-31 12:07:00
from __future__ import print_function, division, absolute_import
from marvin.web.controllers.galaxy import make_nsa_dict
from marvin.web.controllers.galaxy import getWebMap
from marvin.tools.cube import Cube
from marvin.tests.conftest import set_the_config
import pytest
@pytest.fixture()
def cube(galaxy, mode):
set_the_config(galaxy.release)
cube = Cube(plateifu=galaxy.plateifu, mode=mode, release=galaxy.release)
cube.exp_nsa_plotcols = galaxy.nsa_data
return cube
@pytest.fixture()
def params(galaxy):
return {'release': galaxy.release}
@pytest.mark.parametrize('page', [('galaxy_page', 'Galaxy:index')], ids=['galaxy'], indirect=True)
class TestGalaxyPage(object):
def test_assert_galaxy_template_used(self, page, get_templates):
page.load_page('get', page.url)
assert '' == page.data
template, context = get_templates[0]
assert 'galaxy.html' == template.name, 'Template used should be galaxy.html'
@pytest.mark.parametrize('page', [('galaxy_page', 'initnsaplot')], ids=['initnsa'], indirect=True)
class TestNSA(object):
#@marvin_test_if(mark='skip', cube=dict(nsa=[None]))
def test_nsadict_correct(self, cube, page):
nsa, cols = make_nsa_dict(cube.nsa)
for value in cube.exp_nsa_plotcols.values():
assert set(value.keys()).issubset(set(cols))
page.assert_dict_contains_subset(value, nsa)
page.assertListIn(value.keys(), cols)
@pytest.mark.skip('these magically worked when they should not have and now they actually do not')
def test_initnsa_method_not_allowed(self, page, params, get_templates):
page.load_page('get', page.url, params=params)
template, context = get_templates[0]
assert template.name == 'errors/method_not_allowed.html'
def test_initnsa_no_plateifu(self, page, get_templates):
errmsg = 'Field may not be null.'
page.load_page('post', page.url)
template, context = get_templates[0]
page.route_no_valid_webparams(template, context, 'plateifu', reqtype='post', errmsg=errmsg)
class TestWebMap(object):
@pytest.mark.parametrize('parameter, channel',
[('emline_gflux', 'ha_6564'),
('emline_gsigma', 'ha_6564'),
('stellar_sigma', None)],
ids=['gflux', 'gsigma', 'stellarsigma'])
def test_getmap(self, cube, parameter, channel):
webmap, mapmsg = getWebMap(cube, parameter=parameter, channel=channel)
assert isinstance(webmap, dict)
assert 'values' in webmap
assert isinstance(webmap['values'], list)
assert parameter in mapmsg
if 'sigma' in parameter and cube.release != 'MPL-6':
assert 'Corrected' in mapmsg
def test_getmap_failed(self, cube):
webmap, mapmsg = getWebMap(cube, parameter='crap')
assert webmap is None
assert 'Could not get map' in mapmsg
| albireox/marvin | python/marvin/tests/web/test_galaxy.py | Python | bsd-3-clause | 3,224 | [
"Brian",
"Galaxy"
] | e6589ae36db72b7ff438f741efa1e2be295f26694b8519f1a3303a140a45b87b |
#! /usr/bin/python
# -*- coding: utf-8 -*-
"""versioneer.py
(like a rocketeer, but for versions)
* https://github.com/warner/python-versioneer
* Brian Warner
* License: Public Domain
* Version: 0.7+
This file helps distutils-based projects manage their version number by just
creating version-control tags.
For developers who work from a VCS-generated tree (e.g. 'git clone' etc),
each 'setup.py version', 'setup.py build', 'setup.py sdist' will compute a
version number by asking your version-control tool about the current
checkout. The version number will be written into a generated _version.py
file of your choosing, where it can be included by your __init__.py
For users who work from a VCS-generated tarball (e.g. 'git archive'), it will
compute a version number by looking at the name of the directory created when
te tarball is unpacked. This conventionally includes both the name of the
project and a version number.
For users who work from a tarball built by 'setup.py sdist', it will get a
version number from a previously-generated _version.py file.
As a result, loading code directly from the source tree will not result in a
real version. If you want real versions from VCS trees (where you frequently
update from the upstream repository, or do new development), you will need to
do a 'setup.py version' after each update, and load code from the build/
directory.
You need to provide this code with a few configuration values:
versionfile_source:
A project-relative pathname into which the generated version strings
should be written. This is usually a _version.py next to your project's
main __init__.py file. If your project uses src/myproject/__init__.py,
this should be 'src/myproject/_version.py'. This file should be checked
in to your VCS as usual: the copy created below by 'setup.py
update_files' will include code that parses expanded VCS keywords in
generated tarballs. The 'build' and 'sdist' commands will replace it with
a copy that has just the calculated version string.
versionfile_build:
Like versionfile_source, but relative to the build directory instead of
the source directory. These will differ when your setup.py uses
'package_dir='. If you have package_dir={'myproject': 'src/myproject'},
then you will probably have versionfile_build='myproject/_version.py' and
versionfile_source='src/myproject/_version.py'.
tag_prefix: a string, like 'PROJECTNAME-', which appears at the start of all
VCS tags. If your tags look like 'myproject-1.2.0', then you
should use tag_prefix='myproject-'. If you use unprefixed tags
like '1.2.0', this should be an empty string.
parentdir_prefix: a string, frequently the same as tag_prefix, which
appears at the start of all unpacked tarball filenames. If
your tarball unpacks into 'myproject-1.2.0', this should
be 'myproject-'.
To use it:
1: include this file in the top level of your project
2: make the following changes to the top of your setup.py:
import versioneer
versioneer.versionfile_source = 'src/myproject/_version.py'
versioneer.versionfile_build = 'myproject/_version.py'
versioneer.tag_prefix = '' # tags are like 1.2.0
versioneer.parentdir_prefix = 'myproject-' # dirname like 'myproject-1.2.0'
3: add the following arguments to the setup() call in your setup.py:
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
4: run 'setup.py update_files', which will create _version.py, and will
append the following to your __init__.py:
from _version import __version__
5: modify your MANIFEST.in to include versioneer.py
6: add both versioneer.py and the generated _version.py to your VCS
"""
from __future__ import print_function, division, absolute_import
import os
import sys
import re
import subprocess
from distutils.core import Command
from distutils.command.sdist import sdist as _sdist
from distutils.command.build import build as _build
versionfile_source = None
versionfile_build = None
tag_prefix = None
parentdir_prefix = None
VCS = "git"
IN_LONG_VERSION_PY = False
GIT = "git"
LONG_VERSION_PY = '''
IN_LONG_VERSION_PY = True
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by github's download-from-tag
# feature). Distribution tarballs (build by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.7+ (https://github.com/warner/python-versioneer)
# these strings will be replaced by git during git-archive
git_refnames = "%(DOLLAR)sFormat:%%d%(DOLLAR)s"
git_full = "%(DOLLAR)sFormat:%%H%(DOLLAR)s"
import subprocess
import sys
def run_command(args, cwd=None, verbose=False):
try:
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen(args, stdout=subprocess.PIPE, cwd=cwd)
except EnvironmentError:
e = sys.exc_info()[1]
if verbose:
print("unable to run %%s" %% args[0])
print(e)
return None
stdout = p.communicate()[0].strip()
if sys.version >= '3':
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %%s (error)" %% args[0])
return None
return stdout
import sys
import re
import os.path
def get_expanded_variables(versionfile_source):
# the code embedded in _version.py can just fetch the value of these
# variables. When used from setup.py, we don't want to import
# _version.py, so we do it with a regexp instead. This function is not
# used from _version.py.
variables = {}
try:
for line in open(versionfile_source,"r").readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
variables["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
variables["full"] = mo.group(1)
except EnvironmentError:
pass
return variables
def versions_from_expanded_variables(variables, tag_prefix, verbose=False):
refnames = variables["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("variables are unexpanded, not using")
return {} # unexpanded, so not in an unpacked git-archive tarball
refs = set([r.strip() for r in refnames.strip("()").split(",")])
for ref in list(refs):
if not re.search(r'\d', ref):
if verbose:
print("discarding '%%s', no digits" %% ref)
refs.discard(ref)
# Assume all version tags have a digit. git's %%d expansion
# behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us
# distinguish between branches and tags. By ignoring refnames
# without digits, we filter out many common branch names like
# "release" and "stabilization", as well as "HEAD" and "master".
if verbose:
print("remaining refs: %%s" %% ",".join(sorted(refs)))
for ref in sorted(refs):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %%s" %% r)
return { "version": r,
"full": variables["full"].strip() }
# no suitable tags, so we use the full revision id
if verbose:
print("no suitable tags, using full revision id")
return { "version": variables["full"].strip(),
"full": variables["full"].strip() }
def versions_from_vcs(tag_prefix, versionfile_source, verbose=False):
# this runs 'git' from the root of the source tree. That either means
# someone ran a setup.py command (and this code is in versioneer.py, so
# IN_LONG_VERSION_PY=False, thus the containing directory is the root of
# the source tree), or someone ran a project-specific entry point (and
# this code is in _version.py, so IN_LONG_VERSION_PY=True, thus the
# containing directory is somewhere deeper in the source tree). This only
# gets called if the git-archive 'subst' variables were *not* expanded,
# and _version.py hasn't already been rewritten with a short version
# string, meaning we're inside a checked out source tree.
try:
here = os.path.abspath(__file__)
except NameError:
# some py2exe/bbfreeze/non-CPython implementations don't do __file__
return {} # not always correct
# versionfile_source is the relative path from the top of the source tree
# (where the .git directory might live) to this file. Invert this to find
# the root from __file__.
root = here
if IN_LONG_VERSION_PY:
for i in range(len(versionfile_source.split("/"))):
root = os.path.dirname(root)
else:
root = os.path.dirname(here)
if not os.path.exists(os.path.join(root, ".git")):
if verbose:
print("no .git in %%s" %% root)
return {}
stdout = run_command([GIT, "describe", "--tags", "--dirty", "--always"],
cwd=root)
if stdout is None:
return {}
if not stdout.startswith(tag_prefix):
if verbose:
print("tag '%%s' doesn't start with prefix '%%s'" %% (stdout, tag_prefix))
return {}
tag = stdout[len(tag_prefix):]
stdout = run_command([GIT, "rev-parse", "HEAD"], cwd=root)
if stdout is None:
return {}
full = stdout.strip()
if tag.endswith("-dirty"):
full += "-dirty"
return {"version": tag, "full": full}
def versions_from_parentdir(parentdir_prefix, versionfile_source, verbose=False):
if IN_LONG_VERSION_PY:
# We're running from _version.py. If it's from a source tree
# (execute-in-place), we can work upwards to find the root of the
# tree, and then check the parent directory for a version string. If
# it's in an installed application, there's no hope.
try:
here = os.path.abspath(__file__)
except NameError:
# py2exe/bbfreeze/non-CPython don't have __file__
return {} # without __file__, we have no hope
# versionfile_source is the relative path from the top of the source
# tree to _version.py. Invert this to find the root from __file__.
root = here
for i in range(len(versionfile_source.split("/"))):
root = os.path.dirname(root)
else:
# we're running from versioneer.py, which means we're running from
# the setup.py in a source tree. sys.argv[0] is setup.py in the root.
here = os.path.abspath(sys.argv[0])
root = os.path.dirname(here)
# Source tarballs conventionally unpack into a directory that includes
# both the project name and a version string.
dirname = os.path.basename(root)
if not dirname.startswith(parentdir_prefix):
if verbose:
print("guessing rootdir is '%%s', but '%%s' doesn't start with prefix '%%s'" %%
(root, dirname, parentdir_prefix))
return None
return {"version": dirname[len(parentdir_prefix):], "full": ""}
tag_prefix = "%(TAG_PREFIX)s"
parentdir_prefix = "%(PARENTDIR_PREFIX)s"
versionfile_source = "%(VERSIONFILE_SOURCE)s"
def get_versions(default={"version": "unknown", "full": ""}, verbose=False):
variables = { "refnames": git_refnames, "full": git_full }
ver = versions_from_expanded_variables(variables, tag_prefix, verbose)
if not ver:
ver = versions_from_vcs(tag_prefix, versionfile_source, verbose)
if not ver:
ver = versions_from_parentdir(parentdir_prefix, versionfile_source,
verbose)
if not ver:
ver = default
return ver
'''
def run_command(args, cwd=None, verbose=False):
try:
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen(args, stdout=subprocess.PIPE, cwd=cwd)
except EnvironmentError:
e = sys.exc_info()[1]
if verbose:
print("unable to run %s" % args[0])
print(e)
return None
stdout = p.communicate()[0].strip()
if sys.version >= '3':
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % args[0])
return None
return stdout
def get_expanded_variables(versionfile_source):
# the code embedded in _version.py can just fetch the value of these
# variables. When used from setup.py, we don't want to import
# _version.py, so we do it with a regexp instead. This function is not
# used from _version.py.
variables = {}
try:
for line in open(versionfile_source,"r").readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
variables["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
variables["full"] = mo.group(1)
except EnvironmentError:
pass
return variables
def versions_from_expanded_variables(variables, tag_prefix, verbose=False):
refnames = variables["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("variables are unexpanded, not using")
return {} # unexpanded, so not in an unpacked git-archive tarball
refs = set([r.strip() for r in refnames.strip("()").split(",")])
for ref in list(refs):
if not re.search(r'\d', ref):
if verbose:
print("discarding '%s', no digits" % ref)
refs.discard(ref)
# Assume all version tags have a digit. git's %d expansion
# behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us
# distinguish between branches and tags. By ignoring refnames
# without digits, we filter out many common branch names like
# "release" and "stabilization", as well as "HEAD" and "master".
if verbose:
print("remaining refs: %s" % ",".join(sorted(refs)))
for ref in sorted(refs):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %s" % r)
return { "version": r,
"full": variables["full"].strip() }
# no suitable tags, so we use the full revision id
if verbose:
print("no suitable tags, using full revision id")
return { "version": variables["full"].strip(),
"full": variables["full"].strip() }
def versions_from_vcs(tag_prefix, versionfile_source, verbose=False):
# this runs 'git' from the root of the source tree. That either means
# someone ran a setup.py command (and this code is in versioneer.py, so
# IN_LONG_VERSION_PY=False, thus the containing directory is the root of
# the source tree), or someone ran a project-specific entry point (and
# this code is in _version.py, so IN_LONG_VERSION_PY=True, thus the
# containing directory is somewhere deeper in the source tree). This only
# gets called if the git-archive 'subst' variables were *not* expanded,
# and _version.py hasn't already been rewritten with a short version
# string, meaning we're inside a checked out source tree.
try:
here = os.path.abspath(__file__)
except NameError:
# some py2exe/bbfreeze/non-CPython implementations don't do __file__
return {} # not always correct
# versionfile_source is the relative path from the top of the source tree
# (where the .git directory might live) to this file. Invert this to find
# the root from __file__.
root = here
if IN_LONG_VERSION_PY:
for i in range(len(versionfile_source.split("/"))):
root = os.path.dirname(root)
else:
root = os.path.dirname(here)
if not os.path.exists(os.path.join(root, ".git")):
if verbose:
print("no .git in %s" % root)
return {}
stdout = run_command([GIT, "describe", "--tags", "--dirty", "--always"],
cwd=root)
if stdout is None:
return {}
if not stdout.startswith(tag_prefix):
if verbose:
print("tag '%s' doesn't start with prefix '%s'" % (stdout, tag_prefix))
return {}
tag = stdout[len(tag_prefix):]
stdout = run_command([GIT, "rev-parse", "HEAD"], cwd=root)
if stdout is None:
return {}
full = stdout.strip()
if tag.endswith("-dirty"):
full += "-dirty"
return {"version": tag, "full": full}
def versions_from_parentdir(parentdir_prefix, versionfile_source, verbose=False):
if IN_LONG_VERSION_PY:
# We're running from _version.py. If it's from a source tree
# (execute-in-place), we can work upwards to find the root of the
# tree, and then check the parent directory for a version string. If
# it's in an installed application, there's no hope.
try:
here = os.path.abspath(__file__)
except NameError:
# py2exe/bbfreeze/non-CPython don't have __file__
return {} # without __file__, we have no hope
# versionfile_source is the relative path from the top of the source
# tree to _version.py. Invert this to find the root from __file__.
root = here
for i in range(len(versionfile_source.split("/"))):
root = os.path.dirname(root)
else:
# we're running from versioneer.py, which means we're running from
# the setup.py in a source tree. sys.argv[0] is setup.py in the root.
here = os.path.abspath(sys.argv[0])
root = os.path.dirname(here)
# Source tarballs conventionally unpack into a directory that includes
# both the project name and a version string.
dirname = os.path.basename(root)
if not dirname.startswith(parentdir_prefix):
if verbose:
print("guessing rootdir is '%s', but '%s' doesn't start with prefix '%s'" %
(root, dirname, parentdir_prefix))
return None
return {"version": dirname[len(parentdir_prefix):], "full": ""}
def do_vcs_install(versionfile_source, ipy):
run_command([GIT, "add", "versioneer.py"])
run_command([GIT, "add", versionfile_source])
run_command([GIT, "add", ipy])
present = False
try:
f = open(".gitattributes", "r")
for line in f.readlines():
if line.strip().startswith(versionfile_source):
if "export-subst" in line.strip().split()[1:]:
present = True
f.close()
except EnvironmentError:
pass
if not present:
f = open(".gitattributes", "a+")
f.write("%s export-subst\n" % versionfile_source)
f.close()
run_command([GIT, "add", ".gitattributes"])
SHORT_VERSION_PY = """
# This file was generated by 'versioneer.py' (0.7+) from
# revision-control system data, or from the parent directory name of an
# unpacked source archive. Distribution tarballs contain a pre-generated copy
# of this file.
version_version = '%(version)s'
version_full = '%(full)s'
def get_versions(default={}, verbose=False):
return {'version': version_version, 'full': version_full}
"""
DEFAULT = {"version": "unknown", "full": "unknown"}
def versions_from_file(filename):
versions = {}
try:
f = open(filename)
except EnvironmentError:
return versions
for line in f.readlines():
mo = re.match("version_version = '([^']+)'", line)
if mo:
versions["version"] = mo.group(1)
mo = re.match("version_full = '([^']+)'", line)
if mo:
versions["full"] = mo.group(1)
return versions
def write_to_version_file(filename, versions):
f = open(filename, "w")
f.write(SHORT_VERSION_PY % versions)
f.close()
print("set %s to '%s'" % (filename, versions["version"]))
def get_best_versions(versionfile, tag_prefix, parentdir_prefix,
default=DEFAULT, verbose=False):
# returns dict with two keys: 'version' and 'full'
#
# extract version from first of _version.py, 'git describe', parentdir.
# This is meant to work for developers using a source checkout, for users
# of a tarball created by 'setup.py sdist', and for users of a
# tarball/zipball created by 'git archive' or github's download-from-tag
# feature.
variables = get_expanded_variables(versionfile_source)
if variables:
ver = versions_from_expanded_variables(variables, tag_prefix)
if ver:
if verbose: print("got version from expanded variable %s" % ver)
return ver
ver = versions_from_file(versionfile)
if ver:
if verbose: print("got version from file %s %s" % (versionfile, ver))
return ver
ver = versions_from_vcs(tag_prefix, versionfile_source, verbose)
if ver:
if verbose: print("got version from git %s" % ver)
return ver
ver = versions_from_parentdir(parentdir_prefix, versionfile_source, verbose)
if ver:
if verbose: print("got version from parentdir %s" % ver)
return ver
if verbose: print("got version from default %s" % ver)
return default
def get_versions(default=DEFAULT, verbose=False):
assert versionfile_source is not None, "please set versioneer.versionfile_source"
assert tag_prefix is not None, "please set versioneer.tag_prefix"
assert parentdir_prefix is not None, "please set versioneer.parentdir_prefix"
return get_best_versions(versionfile_source, tag_prefix, parentdir_prefix,
default=default, verbose=verbose)
def get_version(verbose=False):
return get_versions(verbose=verbose)["version"]
class cmd_version(Command):
description = "report generated version string"
user_options = []
boolean_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
ver = get_version(verbose=True)
print("Version is currently: %s" % ver)
class cmd_build(_build):
def run(self):
versions = get_versions(verbose=True)
_build.run(self)
# now locate _version.py in the new build/ directory and replace it
# with an updated value
target_versionfile = os.path.join(self.build_lib, versionfile_build)
print("UPDATING %s" % target_versionfile)
os.unlink(target_versionfile)
f = open(target_versionfile, "w")
f.write(SHORT_VERSION_PY % versions)
f.close()
class cmd_sdist(_sdist):
def run(self):
versions = get_versions(verbose=True)
self._versioneer_generated_versions = versions
# unless we update this, the command will keep using the old version
self.distribution.metadata.version = versions["version"]
return _sdist.run(self)
def make_release_tree(self, base_dir, files):
_sdist.make_release_tree(self, base_dir, files)
# now locate _version.py in the new base_dir directory (remembering
# that it may be a hardlink) and replace it with an updated value
target_versionfile = os.path.join(base_dir, versionfile_source)
print("UPDATING %s" % target_versionfile)
os.unlink(target_versionfile)
f = open(target_versionfile, "w")
f.write(SHORT_VERSION_PY % self._versioneer_generated_versions)
f.close()
INIT_PY_SNIPPET = """
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
"""
class cmd_update_files(Command):
description = "modify __init__.py and create _version.py"
user_options = []
boolean_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
ipy = os.path.join(os.path.dirname(versionfile_source), "__init__.py")
print(" creating %s" % versionfile_source)
f = open(versionfile_source, "w")
f.write(LONG_VERSION_PY % {"DOLLAR": "$",
"TAG_PREFIX": tag_prefix,
"PARENTDIR_PREFIX": parentdir_prefix,
"VERSIONFILE_SOURCE": versionfile_source,
})
f.close()
try:
old = open(ipy, "r").read()
except EnvironmentError:
old = ""
if INIT_PY_SNIPPET not in old:
print(" appending to %s" % ipy)
f = open(ipy, "a")
f.write(INIT_PY_SNIPPET)
f.close()
else:
print(" %s unmodified" % ipy)
do_vcs_install(versionfile_source, ipy)
def get_cmdclass():
return {'version': cmd_version,
'update_files': cmd_update_files,
'build': cmd_build,
'sdist': cmd_sdist,
}
| shiquanwang/numba | versioneer.py | Python | bsd-2-clause | 25,596 | [
"Brian"
] | 615c15f47b1a6d1d90d8f070f6a7db0fa39504a511f0938050d236b1cc7c4ed4 |
import mixtape.featurizer, mixtape.tica, mixtape.cluster, mixtape.markovstatemodel, mixtape.ghmm
import numpy as np
import mdtraj as md
from parameters import load_trajectories, build_full_featurizer
import sklearn.pipeline, sklearn.externals.joblib
import mixtape.utils
n_choose = 100
stride = 1
lag_time = 1
trj0, trajectories, filenames = load_trajectories(stride=stride)
train = trajectories[0::2]
test = trajectories[1::2]
featurizer = sklearn.externals.joblib.load("./featurizer-%d.job" % n_choose)
n_components_list = [8]
n_states_list = range(5, 60)
train_scores = np.zeros((len(n_components_list), len(n_states_list)))
test_scores = np.zeros((len(n_components_list), len(n_states_list)))
for i, n_components in enumerate(n_components_list):
for j, n_states in enumerate(n_states_list):
print(n_components, n_states)
tica = mixtape.tica.tICA(n_components=n_components, lag_time=lag_time)
subsampler = mixtape.utils.Subsampler(lag_time=lag_time)
msm = mixtape.markovstatemodel.MarkovStateModel(n_timescales=n_components)
cluster = mixtape.cluster.KMeans(n_states)
pipeline = sklearn.pipeline.Pipeline([("features", featurizer), ('tica', tica), ("subsampler", subsampler), ("cluster", cluster), ("msm", msm)])
pipeline.fit(train)
train_scores[i, j] = pipeline.score(train)
test_scores[i, j] = pipeline.score(test)
plot(n_states_list, train_scores.T, 'o', label="train")
plot(n_states_list, test_scores.T, 'o', label="test")
xlabel("n_states")
ylabel("Score")
title("tICA KMeans SETD2")
legend(loc=0)
ylim(4, 10)
savefig("/home/kyleb/src/kyleabeauchamp/MixtapeTalk/figures/SETD2_tICA_KMeans.png")
| kyleabeauchamp/PMTStuff | code/test_cv_cluster.py | Python | gpl-2.0 | 1,688 | [
"MDTraj"
] | 5c14cef8e9c75a8427e6518cda875c3aa84a35847ce3fbd5159ea409cabd7455 |
# Written by Advi
# v1.2, NewAge: Removed 'Might Mortal' from daggers, no longer available in C3, added rest of weapons, replaced 'Bigsword' with 'Fist'
# v1.3 DrLecter: added adena support; NewAge: added correct prices/crystals
import sys
from net.sf.l2j.gameserver.model.quest import State
from net.sf.l2j.gameserver.model.quest import QuestState
from net.sf.l2j.gameserver.model.quest.jython import QuestJython as JQuest
from net.sf.l2j.gameserver import ItemTable
SMITHS = [7283,7298,7300,7317,7458,7471,7526,7527,7536,7621,7678,7688,7846,7898,8002,8044,8271,8274,8316,8539,8583,8626,8668]
############################## Feel Free to add more Weapons ##########################################################################################################3
# Weapon enhancement definition WeaponID:[Icon, [Enhancement, newWeaponID, CrystalID, MaterialID, MaterialQuant, Adena], ...]
EnhanceList={
#Bows
281:["weapon_crystallized_ice_bow_i01", [["Guidance", 4810, 4634, 2131, 97, 291000], ["Evasion", 4811, 4645, 2131, 97, 291000], ["Quick Recovery", 4812, 4656, 2131, 97, 291000]]],
285:["weapon_noble_elven_bow_i01", [["Evasion", 4816, 4635, 2131, 238, 714000], ["Miser", 4817, 4646, 2131, 238, 714000], ["Cheap Shot", 4818, 4657, 2131, 238, 714000]]],
283:["weapon_akat_long_bow_i01", [["Guidance", 4819, 4636, 2131, 306, 918000], ["Evasion", 4820, 4647, 2131, 306, 918000], ["Miser", 4821, 4658, 2131, 306, 918000]]],
286:["weapon_eminence_bow_i01", [["Guidance", 4822, 4637, 2131, 555, 1665000], ["Miser", 4823, 4648, 2131, 555, 1665000], ["Cheap Shot", 4824, 4659, 2131, 555, 1665000]]],
284:["weapon_dark_elven_long_bow_i01", [["Evasion", 4825, 4638, 2132, 222, 2220000], ["Critical Bleed", 4826, 4649, 2132, 222, 2220000], ["Miser", 4827, 4660, 2132, 222, 2220000]]],
287:["weapon_hazard_bow_i01", [["Guidance", 4828, 4639, 2132, 339, 3390000], ["Quick Recovery", 4829, 4650, 2132, 339, 3390000], ["Cheap Shot", 4830, 4661, 2132, 339, 3390000]]],
282:["weapon_elemental_bow_i01", [["Guidance", 4813, 4635, 2131, 238, 714000], ["Miser", 4814, 4646, 2131, 238, 714000], ["Quick Recovery", 4815, 4657, 2131, 238, 714000]]],
# Swords
72: ["weapon_stormbringer_i01", [["Critical Anger", 4681, 4634, 2131, 97, 291000], ["Focus", 4682, 4645, 2131, 97, 291000], ["Light", 4683, 4656, 2131, 97, 291000]]],
73: ["weapon_shamshir_i01", [["Guidance", 4684, 4635, 2131, 238, 714000], ["Back Blow", 4685, 4646, 2131, 238, 714000], ["Rsk. Evasion", 4686, 4657, 2131, 238, 714000]]],
74: ["weapon_katana_i01", [["Focus", 4687, 4635, 2131, 238, 714000], ["Critical Damage", 4688, 4646, 2131, 238, 714000], ["Haste", 4689, 4657, 2131, 238, 714000]]],
131:["weapon_spirits_sword_i01", [["Critical Damage", 4690, 4635, 2131, 238, 714000], ["Critical Poison", 4691, 4646, 2131, 238, 714000], ["Haste", 4692, 4657, 2131, 238, 714000]]],
133:["weapon_raid_sword_i01", [["Focus", 4693, 4635, 2131, 238, 714000], ["Critical Drain", 4694, 4646, 2131, 238, 714000], ["Critical Poison", 4695, 4657, 2131, 238, 714000]]],
76: ["weapon_sword_of_delusion_i01", [["Focus", 4699, 4636, 2131, 306, 918000], ["Health", 4700, 4647, 2131, 306, 918000], ["Rsk. Haste", 4701, 4658, 2131, 306, 918000]]],
77: ["weapon_tsurugi_i01", [["Focus", 4702, 4636, 2131, 306, 918000], ["Critical Damage", 4703, 4647, 2131, 306, 918000], ["Haste", 4704, 4658, 2131, 306, 918000]]],
134:["weapon_sword_of_nightmare_i01", [["Health", 4705, 4636, 2131, 306, 918000], ["Focus", 4706, 4647, 2131, 306, 918000], ["Light", 4707, 4658, 2131, 306, 918000]]],
142:["weapon_keshanberk_i01", [["Guidance", 4714, 4638, 2132, 222, 2220000], ["Focus", 4715, 4649, 2132, 222, 2220000], ["Back Blow", 4716, 4660, 2132, 222, 2220000]]],
148:["weapon_sword_of_valhalla_i01", [["Acumen", 7722, 4638, 2132, 222, 2220000], ["Magic Weakness", 7723, 4649, 2132, 222, 2220000], ["Magic Regeneration", 7724, 4660, 2132, 222, 2220000]]],
79: ["weapon_sword_of_damascus_i01", [["Focus", 4717, 4639, 2132, 339, 3390000], ["Critical Damage", 4718, 4650, 2132, 339, 3390000], ["Haste", 4719, 4661, 2132, 339, 3390000]]],
78: ["weapon_great_sword_i01", [["Health", 4723, 4638, 2132, 222, 2220000], ["Critical Damage", 4724, 4649, 2132, 222, 2220000], ["Focus", 4725, 4660, 2132, 222, 2220000]]],
132:["weapon_sword_of_limit_i01", [["Guidance", 6307, 4636, 2131, 306, 918000], ["Critical Drain", 6308, 4647, 2131, 306, 918000], ["Health", 6309, 4658, 2131, 306, 918000]]],
145:["weapon_deathbreath_sword_i01", [["Empower", 6310, 4636, 2131, 306, 918000], ["Magic Power", 6311, 4647, 2131, 306, 918000], ["Magic Silence", 6312, 4658, 2131, 306, 918000]]],
84:["weapon_homunkuluss_sword_i01", [["Acumen", 6313, 4636, 2131, 306, 918000], ["Conversion", 6314, 4647, 2131, 306, 918000], ["Magic Paralyze", 6315, 4658, 2131, 306, 918000]]],
75:["weapon_caliburs_i01", [["Guidance", 4696, 4636, 2131, 306, 918000], ["Focus", 4697, 4647, 2131, 306, 918000], ["Critical Damage", 4698, 4658, 2131, 306, 918000]]],
135:["weapon_samurai_longsword_i01", [["Focus", 4708, 4637, 2131, 555, 1665000], ["Critical Damage", 4709, 4648, 2131, 555, 1665000], ["Haste", 4710, 4659, 2131, 555, 1665000]]],
71:["weapon_flamberge_i01", [["Critical Damage", 4711, 4634, 2131, 97, 291000], ["Focus", 4712, 4645, 2131, 97, 291000], ["Light", 4713, 4656, 2131, 97, 291000]]],
5286:["weapon_berserker_blade_i01", [["Focus", 6347, 4637, 2131, 555, 1665000], ["Critical Damage", 6348, 4648, 2131, 555, 1665000], ["Haste", 6349, 4659, 2131, 555, 1665000]]],
# Blunts
89:["weapon_big_hammer_i01", [["Health", 4726, 4634, 2131, 97, 291000], ["Rsk. Focus", 4727, 4645, 2131, 97, 291000], ["Haste", 4728, 4656, 2131, 97, 291000]]],
160:["weapon_battle_axe_i01", [["Anger", 4729, 4634, 2131, 97, 291000], ["Rsk. Focus", 4730, 4645, 2131, 97, 291000], ["Haste", 4731, 4656, 2131, 97, 291000]]],
161:["weapon_war_pick_i01", [["Anger", 4732, 4634, 2131, 97, 291000], ["Rsk. Focus", 4733, 4645, 2131, 97, 291000], ["Haste", 4734, 4656, 2131, 97, 291000]]],
193:["weapon_stick_of_faith_i01", [["Mana Up", 7701, 4634, 2131, 97, 291000], ["Magic Hold", 7702, 4645, 2131, 97, 291000], ["Magic Shield", 7703, 4656, 2131, 97, 291000]]],
162:["weapon_war_axe_i01", [["Anger", 4741, 4636, 2131, 306, 918000], ["Health", 4742, 4647, 2131, 306, 918000], ["Haste", 4743, 4658, 2131, 306, 918000]]],
173:["weapon_skull_graver_i01", [["Anger", 4735, 4634, 2131, 97, 291000], ["Health", 4736, 4645, 2131, 97, 291000], ["Rsk. Focus", 4737, 4656, 2131, 97, 291000]]],
2502:["weapon_dwarven_warhammer_i01", [["Anger", 4738, 4635, 2131, 238, 714000], ["Health", 4739, 4646, 2131, 238, 714000], ["Haste", 4740, 4657, 2131, 238, 714000]]],
2503:["weapon_yaksa_mace_i01", [["Anger", 4744, 4637, 2131, 555, 1665000], ["Health", 4745, 4648, 2131, 555, 1665000], ["Rsk. Focus", 4746, 4659, 2131, 555, 1665000]]],
91: ["weapon_heavy_war_axe_i01", [["Anger", 4747, 4638, 2132, 222, 2220000], ["Health", 4748, 4649, 2132, 222, 2220000], ["Rsk. Focus", 4749, 4660, 2132, 222, 2220000]]],
171:["weapon_deadmans_glory_i01", [["Anger", 4750, 4639, 2132, 339, 3390000], ["Health", 4751, 4650, 2132, 339, 3390000], ["Haste", 4752, 4661, 2132, 339, 3390000]]],
175:["weapon_art_of_battle_axe_i01", [["Health", 4753, 4639, 2132, 339, 3390000], ["Rsk. Focus", 4754, 4650, 2132, 339, 3390000], ["Haste", 4755, 4661, 2132, 339, 3390000]]],
192:["weapon_crystal_staff_i01", [["Rsk. Evasion", 4867, 4634, 2131, 97, 291000], ["Mana Up", 4868, 4645, 2131, 97, 291000], ["Bodily Blessing", 4869, 4656, 2131, 97, 291000]]],
195:["weapon_cursed_staff_i01", [["Magic Hold", 4873, 4635, 2131, 238, 714000], ["Magic Poison", 4874, 4646, 2131, 238, 714000], ["Magic Weakness", 4875, 4657, 2131, 238, 714000]]],
174:["weapon_nirvana_axe_i01", [["Magic Power", 7707, 4636, 2131, 306, 918000], ["Magic Poison", 7708, 4647, 2131, 306, 918000], ["Magic Weakness", 7709, 4658, 2131, 306, 918000]]],
196:["weapon_stick_of_eternity_i01", [["Magic Empower", 7704, 4636, 2131, 306, 918000], ["Rsk. Evasion", 7705, 4647, 2131, 306, 918000], ["Bless The Body", 7706, 4658, 2131, 306, 918000]]],
197:["weapon_paradia_staff_i01", [["Magic Regeneration", 4876, 4636, 2131, 306, 918000], ["Mental Shield", 4877, 4647, 2131, 306, 918000], ["Magic Hold", 4878, 4658, 2131, 306, 918000]]],
198:["weapon_inferno_staff_i01", [["Acumen", 7716, 4636, 2131, 306, 918000], ["Magic Silence", 7717, 4647, 2131, 306, 918000], ["Magic Paralyze", 7718, 4658, 2131, 306, 918000]]],
200:["weapon_sages_staff_i01", [["Magic Hold", 4882, 4636, 2131, 306, 918000], ["Magic Poison", 4883, 4647, 2131, 306, 918000], ["Magic Weakness", 4884, 4658, 2131, 306, 918000]]],
201:["weapon_club_of_nature_i01", [["Acumen", 7710, 4636, 2131, 306, 918000], ["Mental Shield", 7711, 4647, 2131, 306, 918000], ["Magic Hold", 7712, 4658, 2131, 306, 918000]]],
202:["weapon_mace_of_underworld_i01", [["Mana Up", 7713, 4636, 2131, 306, 918000], ["Magic Silence", 7714, 4647, 2131, 306, 918000], ["Conversion", 7715, 4658, 2131, 306, 918000]]],
203:["weapon_paagrio_axe_i01", [["Mana Up", 4885, 4636, 2131, 225, 675000], ["Magic Weakness", 4886, 4647, 2131, 225, 675000], ["Magic Chaos", 4887, 4658, 2131, 225, 675000]]],
204:["weapon_deadmans_staff_i01", [["Magic Regeneration", 4888, 4637, 2131, 555, 1665000], ["Mental Shield", 4889, 4648, 2131, 555, 1665000], ["Magic Hold", 4890, 4659, 2131, 555, 1665000]]],
205:["weapon_ghouls_staff_i01", [["Rsk. Evasion", 4891, 4637, 2131, 555, 1665000], ["Mana Up", 4892, 4648, 2131, 555, 1665000], ["Bodily Blessing", 4893, 4659, 2131, 555, 1665000]]],
206:["weapon_demons_staff_i01", [["Magic Poison", 4894, 4637, 2131, 555, 1665000], ["Magic Weakness", 4895, 4648, 2131, 555, 1665000], ["Magic Chaos", 4896, 4659, 2131, 555, 1665000]]],
92: ["weapon_sprites_staff_i01", [["Magic Regeneration", 4897, 4638, 2132, 222, 2220000], ["Mental Shield", 4898, 4649, 2132, 222, 2220000], ["Magic Hold", 4899, 4660, 2132, 222, 2220000]]],
210:["weapon_staff_of_evil_spirit_magic_i01", [["Magic Focus", 4900, 4639, 2132, 339, 3390000], ["Bodily Blessing", 4901, 4650, 2132, 339, 3390000], ["Magic Poison", 4902, 4661, 2132, 339, 3390000]]],
191:["weapon_heavy_doom_hammer_i01", [["Magic Regeneration", 4864, 4634, 2131, 97, 291000], ["Mental Shield", 4865, 4645, 2131, 97, 291000], ["Magic Hold", 4866, 4656, 2131, 97, 291000]]],
194:["weapon_heavy_doom_axe_i01", [["Magic Poison", 4870, 4634, 2131, 97, 291000], ["Magic Weakness", 4871, 4645, 2131, 97, 291000], ["Magic Chaos", 4872, 4656, 2131, 97, 291000]]],
199:["weapon_paagrio_hammer_i01", [["Rsk. Evasion", 4879, 4636, 2131, 306, 918000], ["Magic Poison", 4880, 4647, 2131, 306, 918000], ["Magic Weakness", 4881, 4658, 2131, 306, 918000]]],
# Dagger'
231:["weapon_grace_dagger_i01", [["Evasion", 4768, 4636, 2131, 306, 918000], ["Focus", 4769, 4647, 2131, 306, 918000], ["Back Blow", 4770, 4658, 2131, 306, 918000]]],
233:["weapon_dark_screamer_i01", [["Evasion", 4771, 4636, 2131, 306, 918000], ["Focus", 4772, 4647, 2131, 306, 918000], ["Critical Bleed", 4773, 4658, 2131, 306, 918000]]],
228:["weapon_crystal_dagger_i01", [["Critical Bleed", 4774, 4637, 2131, 555, 1665000], ["Critical Poison", 4775, 4648, 2131, 555, 1665000], ["Critical Damage", 6358, 4659, 2131, 555, 1665000]]],
229:["weapon_kris_i01", [["Evasion", 4777, 4638, 2132, 222, 2220000], ["Focus", 4778, 4649, 2132, 222, 2220000], ["Back Blow", 4779, 4660, 2132, 222, 2220000]]],
243:["weapon_hell_knife_i01", [["Magic Regeneration", 7813, 4638, 2132, 222, 2220000], ["Mental Shield", 7814, 4649, 2132, 222, 2220000], ["Magic Weakness", 7815, 4660, 2132, 222, 2220000]]],
234:["weapon_demons_sword_i01", [["Critical Bleed", 4780, 4639, 2132, 339, 3390000], ["Critical Poison", 4781, 4650, 2132, 339, 3390000], ["Critical Damage", 6359, 4661, 2132, 339, 3390000]]],
226:["weapon_cursed_dagger_i01", [["Critical Bleed", 4759, 4634, 2131, 97, 291000], ["Critical Poison", 4760, 4645, 2131, 97, 291000], ["Rsk. Haste", 4761, 4656, 2131, 97, 291000]]],
232:["weapon_darkelven_dagger_i01", [["Focus", 4762, 4634, 2131, 97, 291000], ["Back Blow", 4763, 4645, 2131, 97, 291000], ["Rsk. Haste", 6356, 4656, 2131, 97, 291000]]],
227:["weapon_stiletto_i01", [["Critical Bleed", 4765, 4635, 2131, 238, 714000], ["Critical Poison", 4766, 4646, 2131, 238, 714000], ["Rsk. Haste", 6357, 4657, 2131, 238, 714000]]],
242:["weapon_dagger_of_magicflame_i01", [["Mana Up", 7810, 4635, 2131, 238, 714000], ["Magic Hold", 7811, 4646, 2131, 238, 714000], ["Magic Silence", 7812, 4657, 2131, 238, 714000]]],
# Poleaxe'
301:["weapon_scorpion_i01", [["Anger", 4846, 4636, 2131, 225, 675000], ["Critical Stun", 4847, 4647, 2131, 225, 675000], ["Long Blow", 4848, 4659, 2131, 225, 675000]]],
303:["weapon_widow_maker_i01", [["Critical Stun", 4849, 4636, 2131, 225, 675000], ["Long Blow", 4850, 4647, 2131, 225, 675000], ["Wide Blow", 4851, 4658, 2131, 225, 675000]]],
299:["weapon_orcish_poleaxe_i01", [["Critical Stun", 4852, 4637, 2131, 555, 1665000], ["Long Blow", 4853, 4648, 2131, 555, 1665000], ["Wide Blow", 4854, 4659, 2131, 555, 1665000]]],
300:["weapon_great_axe_i01", [["Anger", 4855, 4638, 2132, 222, 2220000], ["Critical Stun", 4856, 4649, 2132, 222, 2220000], ["Light", 4857, 4660, 2132, 222, 2220000]]],
97: ["weapon_lance_i01", [["Anger", 4858, 4639, 2132, 339, 3390000], ["Critical Stun", 4859, 4650, 2132, 339, 3390000], ["Long Blow", 4860, 4661, 2132, 339, 3390000]]],
96:["weapon_scythe_i01", [["Anger", 4834, 4634, 2131, 97, 291000], ["Critical Stun", 4835, 4645, 2131, 97, 291000], ["Light", 4836, 4656, 2131, 97, 291000]]],
298:["weapon_orcish_glaive_i01", [["Anger", 4837, 4634, 2131, 97, 291000], ["Critical Stun", 4838, 4645, 2131, 97, 291000], ["Long Blow", 4839, 4656, 2131, 97, 291000]]],
302:["weapon_body_slasher_i01", [["Critical Stun", 4840, 4634, 2131, 97, 291000], ["Long Blow", 4841, 4645, 2131, 97, 291000], ["Wide Blow", 4842, 4656, 2131, 97, 291000]]],
94:["weapon_bech_de_corbin_i01", [["Critical Stun", 4843, 4635, 2131, 238, 714000], ["Long Blow", 4844, 4646, 2131, 238, 714000], ["Light", 4845, 4657, 2131, 238, 714000]]],
95:["weapon_poleaxe_i01",[["Critical Stun", 7719, 4636, 2131, 306, 918000], ["Long Blow", 7720, 4647, 2131, 306, 918000], ["Wide Blow", 7721, 4658, 2131, 306, 918000]]],
# Fist'
263:["weapon_chakram_i01", [["Critical Drain", 4789, 4634, 2131, 97, 291000], ["Critical Poison", 4790, 4645, 2131, 97, 291000], ["Rsk. Haste", 4791, 4656, 2131, 97, 291000]]],
265:["weapon_fist_blade_i01", [["Rsk. Evasion", 4792, 4636, 2131, 306, 918000], ["Rsk. Haste", 4793, 4647, 2131, 306, 918000], ["Haste", 4794, 4658, 2131, 306, 918000]]],
266:["weapon_great_pata_i01", [["Critical Drain", 4795, 4637, 2131, 555, 1665000], ["Critical Poison", 4796, 4648, 2131, 555, 1665000], ["Rsk. Haste", 4797, 4659, 2131, 555, 1665000]]],
267:["weapon_arthro_nail_i01", [["Critical Poison", 4801, 4638, 2132, 222, 2220000], ["Rsk. Evasion", 4802, 4649, 2132, 222, 2220000], ["Rsk. Haste", 4803, 4660, 2132, 222, 2220000]]],
268:["weapon_bellion_cestus_i01", [["Critical Drain", 4804, 4639, 2132, 339, 3390000], ["Critical Poison", 4805, 4650, 2132, 339, 3390000], ["Rsk. Haste", 4806, 4661, 2132, 339, 3390000]]],
4233:["weapon_knuckle_dust_i01", [["Rsk. Evasion", 4798, 4635, 2131, 238, 714000], ["Rsk. Haste", 4799, 4646, 2131, 238, 714000], ["Haste", 4800, 4657, 2131, 238, 714000]]],
}
############################################################## DO NOT MODIFY BELOW THIS LINE #####################################################################################
def getItemName(Item):
ItemName = Item.getItem().getName()
if Item.getEnchantLevel() > 0:
ItemName = "+" + str(Item.getEnchantLevel()) + " " + ItemName
return ItemName
def getMaterialName(MaterialID):
if MaterialID in range(2131, 2134):
return "Gemstone " + chr(ord('A') + 2133 - MaterialID)
if MaterialID in range(4629, 4640):
return "Red Soul Crystal - Stage " + str(MaterialID - 4629)
if MaterialID in range(4640, 4651):
return "Green Soul Crystal - Stage " + str(MaterialID - 4640)
if MaterialID in range(4651, 4662):
return "Blue Soul Crystal - Stage " + str(MaterialID - 4651)
return "Unknown material"
def getMaterialIcon(MaterialID):
if MaterialID in range(2131, 2134):
return "etc_crystal_ball_green_i00"
if MaterialID in range(4629, 4640):
return "etc_crystal_red_i00"
if MaterialID in range(4640, 4651):
return "etc_crystal_green_i00"
if MaterialID in range(4651, 4662):
return "etc_crystal_blue_i00"
return "etc_unknown_material_i00"
# Main Code
class Quest (JQuest) :
def __init__(self,id,name,descr): JQuest.__init__(self,id,name,descr)
def onEvent (self,event,st) :
htmltext = event
# Creates a List to chose a weapon
if event == "1":
htmltext = ""
for Item in st.getPlayer().getInventory().getItems():
if Item.getItemId() in EnhanceList and not Item.isEquipped():
Icon, Enhancements = EnhanceList[Item.getItemId()]
EnhancID = 0
for Name, WeaponID, CrystalID, MaterialID, MaterialQuant,Adena in Enhancements:
htmltext += "<tr>\n<td width=35><img src=\"icon." + Icon + "\" width=32 height=32 align=\"left\"></td>\n" \
"<td width=835><table border=0 width=\"835\">\n<tr><td><a action=\"bypass -h Quest 1007_enhance 2_" + str(Item.getObjectId()) + "." + str(EnhancID) + "\">" + getItemName(Item) + ": " + Name + "</a></td></tr>\n" \
"<tr><td><font color=\"B09878\">Enhance</font></td></tr></table></td>\n</tr>"
EnhancID += 1
if htmltext == "":
htmltext = "<tr><td>You have no enhancable weapon in your inventory</td></tr>"
htmltext = "<html><body>\nList:\n<left>\n<table width=870 border=0>\n" + htmltext + "</table>\n<br></left></body></html>"
return htmltext
# shows you how much materials you need to enhance, ok button to go forward, too
elif event.startswith("2_"):
reqEnh = event.replace("2_", "").split(".")
ObjectID = int(reqEnh[0])
EnhancID = int(reqEnh[1])
Item = st.getPlayer().getInventory().getItemByObjectId(ObjectID)
if Item.getItemId() in EnhanceList:
Icon, Enhancements = EnhanceList[Item.getItemId()]
Name, WeaponID, CrystalID, MaterialID, MaterialQuant, Adena = Enhancements[EnhancID]
htmltext = st.showHtmlFile("2.htm")
return htmltext.replace("<WeaponName>", getItemName(Item) + ": " + Name)\
.replace("<WeaponIcon>", Icon)\
.replace("<CrystalName>", getMaterialName(CrystalID))\
.replace("<CrystalIcon>", getMaterialIcon(CrystalID))\
.replace("<MaterialName>", getMaterialName(MaterialID))\
.replace("<MaterialIcon>", getMaterialIcon(MaterialID))\
.replace("<MaterialQuantity>", str(MaterialQuant))\
.replace("<Adena>", str(Adena))\
.replace("<EventOK>", "3_" + str(Item.getObjectId()) + "." + str(EnhancID))
# this handels the whole enhance stuff with objectIds, not ItemIds... no html shows up.. just work and socket return
elif event.startswith("3_"):
reqEnh = event.replace("3_", "").split(".")
ObjectID = int(reqEnh[0])
EnhancID = int(reqEnh[1])
Item = st.getPlayer().getInventory().getItemByObjectId(ObjectID)
if Item.getItemId() in EnhanceList:
Icon, Enhancements = EnhanceList[Item.getItemId()]
Name, WeaponID, CrystalID, MaterialID, MaterialQuant, Adena = Enhancements[EnhancID]
if st.getQuestItemsCount(CrystalID) >= 1 and st.getQuestItemsCount(MaterialID) >= MaterialQuant and st.getQuestItemsCount(57) >= Adena :
EnchantLevel = Item.getEnchantLevel()
st.getPlayer().destroyItem("enhance_1007", ObjectID, 1, st.getPlayer(), 0)
NewItem = ItemTable.getInstance().createItem("enhance", WeaponID, 1, st.getPlayer())
NewItem.setEnchantLevel(EnchantLevel)
st.getPlayer().addItem("enhance", NewItem, st.getPlayer(), 0)
st.takeItems(CrystalID, 1)
st.takeItems(MaterialID, MaterialQuant)
st.takeItems(57,Adena)
htmltext = "Item has been succesfully enhanced!"
else :
htmltext = "You do not have enough materials."
# if event is 0, or has a bug... trade is canceled
else :
htmltext = "Trade has been cancelled."
st.setState(COMPLETED)
st.exitQuest(1)
return htmltext
# this just return new html, if the player can talk with this npc about that enhance stuff
def onTalk (self,npc,st):
npcId = npc.getNpcId()
htmltext = "<html><head><body>I have nothing to say to you.</body></html>"
st.set("cond","0")
st.setState(STARTED)
return "1.htm"
QUEST = Quest(1007,"1007_enhance","Blacksmith")
CREATED = State('Start', QUEST)
STARTED = State('Started', QUEST)
COMPLETED = State('Completed', QUEST)
QUEST.setInitialState(CREATED)
# init all npc to the correct stats
for npcId in SMITHS:
QUEST.addStartNpc(npcId)
STARTED.addTalkId(npcId)
# always at the end, then it shows only up if anything is correct in the code.. no jython error.. because we cant check jython errors with idle
print "importing blacksmith data: 1007_enhance"
| Barrog/C4-Datapack | data/jscript/blacksmith/1007_enhance/__init__.py | Python | gpl-2.0 | 21,508 | [
"CRYSTAL"
] | 51bbd395be47c780fa86a70665902c5f62c437973d8a0a8d63acb8597fe87130 |
# -*- mode: C++; indent-tabs-mode: nil; c-basic-offset: 4; tab-width: 4; -*-
# vim: set shiftwidth=4 softtabstop=4 expandtab:
"""Support for reading meta-data and data NetCDF files, primarily
time-series data.
2014 Copyright University Corporation for Atmospheric Research
This file is part of the "django-ncharts" package.
The license and distribution terms for this file may be found in the
file LICENSE in this package.
"""
import os, sys, time
import netCDF4
from datetime import datetime
import pytz
import numpy as np
import logging
import threading
import operator
import hashlib
from functools import reduce as reduce_
from ncharts import exceptions as nc_exc
from ncharts import fileset as nc_fileset
# __name__ is ncharts.netcdf
_logger = logging.getLogger(__name__) # pylint: disable=invalid-name
def get_file_modtime(path):
""" Utility to get the modification time of a file. """
try:
pstat = os.stat(path)
except (FileNotFoundError, PermissionError) as exc:
_logger.error(exc)
raise
return datetime.fromtimestamp(
pstat.st_mtime, tz=pytz.utc)
class NetCDFDataset(object):
"""A dataset consisting of NetCDF files, within a period of time.
This is similar to netCDF4.MFDataset, but gets around some of its
limitations.
Supports reading a list of time-series variables from a
collection of files concatenating the results over the
time-dimension of the variables. If a variable is missing in
a file, the values for those times will be NaN filled.
Also handles other situations that may arise, such as if the
non-time dimensions for a variable change from file to file, in
which case the result will have the largest non-time dimensions,
with the extra values filled in.
Also attempts to handle the situation when the type of a variable
is not consistant over the collection of files.
Attributes:
path: directory path and file name format
fileset: The nc_fileset.FileSet encapsulating a set of files.
start_time: start time of the dataset
end_time: end time of the dataset
cache_hash: a hash string, created from the path, start_time
and end_time. The cache of NetCDF attributes is
stored in the class under this hash code.
These attributes of NetCDFDataset are cached:
variables: Dict of dicts for all time-series variables found in
dataset by their "exported" variable name:
{ 'shape': tuple of integer dimensions of the variable,
'dimnames': tuple of str dimension names for variable,
'units': str value of "units" attribute if found,
'long_name': str value of "long_name" attribute if found,
'dtype': numpy.dtype of the variable
}.
base_time: str name of base_time variable if found in the dataset.
time_dim: str name of the time dimension in this dataset.
time_name: str name of time variable.
nstations: int length of NetCDF "station" dimension in this dataset,
if found.
station_dim: str name of NetCDF "station" dimension, currently always
"station".
station_names: If a NetCDF character variable called "station"
is found, a list of str values of the variable.
"""
# pylint thinks this class is too big.
# pylint: disable=too-many-instance-attributes
MAX_NUM_FILES_TO_PRESCAN = 50
__cache_lock = threading.Lock()
# dictionary of attributes of a NetCDFDataset.
__cached_dataset_info = {}
def __init__(self, path, start_time, end_time):
"""Constructs NetCDFDataset with a path to a filesetFileset.
Raises:
none
"""
self.path = path
self.fileset = nc_fileset.Fileset.get(path)
self.start_time = start_time
self.end_time = end_time
hasher = hashlib.md5()
hasher.update(bytes(path, 'utf-8'))
hasher.update(bytes(str(start_time), 'utf-8'))
hasher.update(bytes(str(end_time), 'utf-8'))
self.cache_hash = hasher.digest()
def get_dataset_info(self):
"""Fetch a copy of the cache of info for this dataset.
"""
with NetCDFDataset.__cache_lock:
if self.cache_hash in NetCDFDataset.__cached_dataset_info:
return NetCDFDataset.__cached_dataset_info[self.cache_hash].copy()
dsinfo = {
'file_mod_times': {},
'base_time': None,
'time_dim_name': None,
'time_name': None,
'nstations': None,
'station_dim': None,
'station_names': None,
'variables': {},
}
return dsinfo
def save_dataset_info(self, dsinfo):
"""Save a copy of info for this dataset.
"""
with NetCDFDataset.__cache_lock:
NetCDFDataset.__cached_dataset_info[self.cache_hash] = dsinfo
def __str__(self):
return "NetCDFDataset, path=" + str(self.path)
def get_files(
self,
start_time=pytz.utc.localize(datetime.min),
end_time=pytz.utc.localize(datetime.max)):
"""Return the fileset.File objects matching a time period.
Args:
start_time: datetime.datetime of start of fileset scan.
end_time: end of fileset scan.
Returns:
List of file path names matching the time period.
Raises:
FileNotFoundError, PermissionError
"""
return self.fileset.scan(start_time, end_time)
def get_filepaths(
self,
start_time=pytz.utc.localize(datetime.min),
end_time=pytz.utc.localize(datetime.max)):
"""Return the file path names matching the time period.
Args:
start_time: datetime.datetime of start of fileset scan.
end_time: end of fileset scan.
Returns:
List of file path names matching the time period.
Raises:
FileNotFoundError, PermissionError
"""
return [f.path for f in self.get_files(start_time, end_time)]
def get_variables(
self,
time_names=('time', 'Time', 'time_offset')):
""" Scan the set of files for time series variables, returning a dict
for information about the variables.
The names of the variables in the dataset are converted to an exported
form. If a variable has a 'short_name' attribute, it is used for the
variable name, otherwise the exported name is set to the NetCDF variable
name.
Note, we don't read every file. May want to have
MAX_NUM_FILES_TO_PRESCAN be an attribute of the dataset.
Even better, would be nice to know that one only
needs to read a reduced set of files, perhaps just one!
Args:
time_names: List of allowed names for time variable.
Returns:
A dict of variables, keyed by the exported variable name.
Each value is a dict, containing the following keys:
shape: tuple containing the shape of the variable
dimnames: list of dimension names
dtype: NetCDF data type
time_index: index of the time dimension
units: units attribute of the NetCDF variable
long_name: long_name attribute of the NetCDF variable
Raises:
nc_exc.NoDataFoundException
"""
dsinfo = self.get_dataset_info()
# Note: dsinfo_vars is a reference. Modificatons to it
# are also modifications to dsinfo.
dsinfo_vars = dsinfo['variables']
files = self.get_files(
start_time=self.start_time,
end_time=self.end_time)
# typically get_files() also returns the file before start_time
# We may want that in reading a period of data, but not
# in assembling the variables for the dataset
filepaths = [f.path for f in files if f.time >= self.start_time and f.time < self.end_time]
skip = 1
if len(filepaths) > NetCDFDataset.MAX_NUM_FILES_TO_PRESCAN:
skip = len(filepaths) / NetCDFDataset.MAX_NUM_FILES_TO_PRESCAN
# Read at most MAX_NUM_FILES_TO_PRESCAN, including latest file.
# Files are scanned in a backwards sequence
pindex = len(filepaths) - 1
n_files_read = 0
while pindex >= 0:
ncpath = filepaths[int(pindex)]
pindex -= skip
# The files might be in the process of being moved, deleted, etc,
# so if we get an exception in this open, try a few more times.
# Testing indicates that with a truncated file (artificially
# truncated with dd), the underlying C code will cause a crash
# of python from an assert() rather than raising an exception
# that could be caught.
# If the netcdf library is compiled with -DNDEBUG, then the
# the open and parse of the truncated header succeeds, but
# still no exception.
# If the file is artificially corrupted by removing an
# initial portion of the file:
# dd if=test.nc of=bad.nc bs=1014 count=100 skip=1
# then an exception is raised (this was with -DNDEBUG):
# RuntimeError bad.nc: NetCDF: Unknown file format
# To make this robust, it would be good to run a king's
# taster process on each file first to reduce the possibility
# of a server death. The king's taster would not use NDEBUG,
# but perhaps the python server would. Complicated.
fileok = False
skip_file = False
exc = None
for itry in range(0, 3):
try:
curr_mod_time = get_file_modtime(ncpath)
if ncpath in dsinfo['file_mod_times']:
prev_mod_time = dsinfo['file_mod_times'][ncpath]
if curr_mod_time <= prev_mod_time:
skip_file = True
fileok = True
break
dsinfo['file_mod_times'][ncpath] = curr_mod_time
# _logger.debug("ncpath=%s",ncpath)
ncfile = netCDF4.Dataset(ncpath)
fileok = True
break
except (OSError, RuntimeError) as exc:
time.sleep(itry)
if not fileok:
_logger.error("%s: %s", ncpath, exc)
continue
n_files_read += 1
if skip_file:
continue
try:
if not dsinfo['base_time'] and 'base_time' in ncfile.variables:
dsinfo['base_time'] = 'base_time'
tdim = None
# look for a time dimension
for tname in ['time', 'Time']:
if tname in ncfile.dimensions:
tdim = ncfile.dimensions[tname]
break
if not tdim:
continue
# check for tdim.is_unlimited?
if not dsinfo['time_dim_name']:
dsinfo['time_dim_name'] = tdim.name
if 'station' in ncfile.dimensions:
if not dsinfo['nstations']:
dsinfo['nstations'] = len(ncfile.dimensions["station"])
dsinfo['station_dim'] = "station"
elif not dsinfo['nstations'] == \
len(ncfile.dimensions["station"]):
_logger.warning(
"%s: station dimension (%d) is "
"different than that of other files (%d)",
ncpath,
len(ncfile.dimensions["station"]),
dsinfo['nstations'])
if not dsinfo['station_names'] and 'station' in ncfile.variables:
var = ncfile.variables["station"]
if var.datatype == np.dtype('S1'):
dsinfo['station_names'] = \
[str(netCDF4.chartostring(v)) for v in var]
# look for a time variable
if not dsinfo['time_name']:
for tname in time_names:
if tname in ncfile.variables:
if tdim.name in ncfile.variables[tname].dimensions:
dsinfo['time_name'] = tname
break
if not dsinfo['time_name'] or \
not dsinfo['time_name'] in ncfile.variables:
# time variable not yet found or not in this file
continue
if not tdim.name in ncfile.variables[dsinfo['time_name']].dimensions:
# time variable in this file doesn't have a time dimension
continue
# pylint: disable=no-member
for (nc_vname, var) in ncfile.variables.items():
# looking for time series variables
if not dsinfo['time_dim_name'] in var.dimensions:
continue
# time variable
if nc_vname == dsinfo['time_name']:
continue
# exported variable name
if hasattr(var, 'short_name'):
exp_vname = getattr(var, 'short_name')
else:
exp_vname = nc_vname
# var.dimensions is a tuple of dimension names
time_index = var.dimensions.index(dsinfo['time_dim_name'])
# Check if we have found this variable in a earlier file
if not exp_vname in dsinfo_vars:
dsinfo_vars[exp_vname] = {}
dsinfo_vars[exp_vname]['netcdf_name'] = nc_vname
dsinfo_vars[exp_vname]['shape'] = var.shape
dsinfo_vars[
exp_vname]['dimnames'] = var.dimensions
dsinfo_vars[exp_vname]['dtype'] = var.dtype
dsinfo_vars[exp_vname]['time_index'] = time_index
# Grab certain attributes
for att in ['units', 'long_name']:
if hasattr(var, att):
dsinfo_vars[exp_vname][att] = getattr(var, att)
# Set default units to ''
if not 'units' in dsinfo_vars[exp_vname]:
dsinfo_vars[exp_vname]['units'] = ''
continue
# variable has been found in an earlier ncfile
# check for consistancy across files
if dsinfo_vars[exp_vname]['shape'][1:] != var.shape[1:]:
# the above check works even if either shape
# has length 1
if len(dsinfo_vars[exp_vname]['shape']) != \
len(var.shape):
# changing number of dimensions, punt
_logger.error(
"%s: %s: number of "
"dimensions: %d and %d changes. "
"Skipping this variable.",
ncpath, nc_vname, len(var.shape),
len(dsinfo_vars[exp_vname]['shape']))
del dsinfo_vars[exp_vname]
continue
# here we know that shapes have same length and
# they must have len > 1. Allow final dimension
# to change.
ndim = len(var.shape)
if (dsinfo_vars[exp_vname]['shape'][1:(ndim-1)] !=
var.shape[1:(ndim-1)]):
_logger.error(
"%s: %s: incompatible shapes: "
"%s and %s. Skipping this variable.",
ncpath, nc_vname, repr(var.shape),
repr(dsinfo_vars[exp_vname]['shape']))
del dsinfo_vars[exp_vname]
continue
# set shape to max shape (leaving the problem
# for later...)
dsinfo_vars[exp_vname]['shape'] = tuple(
[max(i, j) for (i, j) in zip(
dsinfo_vars[exp_vname]['shape'], var.shape)])
if dsinfo_vars[exp_vname]['dtype'] != var.dtype:
_logger.error(
"%s: %s: type=%s is different than "
"in other files",
ncpath, nc_vname, repr(var.dtype))
if dsinfo_vars[exp_vname]['time_index'] != time_index:
_logger.error(
"%s: %s: time_index=%d is different than "
"in other files. Skipping this variable.",
ncpath, nc_vname, time_index)
del dsinfo_vars[exp_vname]
for att in ['units', 'long_name']:
if hasattr(var, att) and att in dsinfo_vars[exp_vname]:
if getattr(var, att) != dsinfo_vars[exp_vname][att]:
_logger.info(
"%s: %s: %s=%s is different than previous value=%s",
ncpath, nc_vname, att, getattr(var, att),
dsinfo_vars[exp_vname][att])
dsinfo_vars[exp_vname][att] = getattr(var, att)
finally:
ncfile.close()
if not n_files_read:
msg = "No variables found"
raise nc_exc.NoDataFoundException(msg)
# cache dsinfo
dsvars = dsinfo_vars.copy()
self.save_dataset_info(dsinfo)
return dsvars
def resolve_variable_shapes(self, variables, selectdim):
"""Determine the shape of variables in this dataset.
Args:
variables: List of variable names.
selectdim: A dict containing by dimension name,
the indices of the dimension to be read.
For example: {"station":[3,4,5]} to read indices 3,4 and 5
(indexed from 0) of the station dimension for variables
which have that dimension. A index of -1 indicates that
variables which don't have the dimension are still to be read.
Returns:
Dict of resultant variable shapes, which may be different
than the non-time dimensions of the variable in a file if
the user has specified selectdim to sub-select over a dimension.
"""
dsinfo = self.get_dataset_info()
if len(dsinfo['variables']) == 0:
self.get_variables()
dsinfo = self.get_dataset_info()
dsinfo_vars = dsinfo['variables']
vshapes = {}
for exp_vname in variables:
if exp_vname in dsinfo_vars:
# maximum shape of this variable in all files
vshape = list(dsinfo_vars[exp_vname]["shape"])
time_index = dsinfo_vars[exp_vname]["time_index"]
vdims = dsinfo_vars[exp_vname]["dimnames"]
dmatch = True
for dim in selectdim:
# some dimensions selected
if not dim in vdims:
# This variable does not have the selected dimension
# If all selected indices for the dimension are >= 0
# then don't return any values for this variable.
# -1 for a selected dimension means return values
# for the variable even if it doesn't have the dimension
try:
if all(i >= 0 for i in selectdim[dim]):
dmatch = False
except TypeError: # not iterable
if selectdim[dim] >= 0:
dmatch = False
if not dmatch:
continue
# determine selected shape for variable
for idim, dim in enumerate(vdims):
if dim == dsinfo['time_dim_name']:
pass
elif dim == "sample":
# high rate files with a sample dimension
# Add support for this eventually. For now
# just grab first value
vshape[idim] = 1
elif dim in selectdim:
# variable has a selected dimension
try:
if not all(i < 0 for i in selectdim[dim]):
idx = [i for i in selectdim[dim] if i >= 0]
vshape[idim] = len(idx)
except TypeError: # not iterable
if selectdim[dim] >= 0:
vshape[idim] = 1
# remove non-time shape values of 1
vshape = [dim for (idim, dim) in enumerate(vshape) \
if idim != time_index or dim > 1]
vshapes[exp_vname] = vshape
return vshapes
def read_times(self, ncfile, ncpath, start_time, end_time, times,
size_limit):
"""Read values of the time variable from a NetCDF dataset.
Args:
ncfile: An opened netCFD4.Dataset.
ncpath: Path to the dataset. netCDF4.Dataset.filepath() is only
supported in netcdf version >= 4.1.2.
start_time: A datetime.datetme. Times greater than or equal
to start_time are read.
end_time: A datetime.datetme. Times less than end_time are read.
times: A list of UTC timestamps, the times read are
appended to this list.
total_size: Add the total size of times read to this value.
size_limit: Raise an exception if the total_size exceeds size_limit.
Returns:
A built-in slice object, giving the start and stop indices of the
requested time period in the file. The times list argument is
also extended with the times read from the file.
Raises:
TODO: what exceptions can be raised when slicing a netcdf4 variable?
nc_exc.TooMuchDataException
"""
debug = False
dsinfo = self.get_dataset_info()
base_time = None
if dsinfo['base_time'] and \
dsinfo['base_time'] in ncfile.variables and \
len(ncfile.variables[dsinfo['base_time']].dimensions) == 0:
base_time = ncfile.variables[dsinfo['base_time']].getValue()
# _logger.debug("base_time=%d",base_time)
if dsinfo['time_name'] in ncfile.variables:
var = ncfile.variables[dsinfo['time_name']]
if len(var) == 0:
return slice(0)
if hasattr(var, "units") and 'since' in var.units:
try:
# times from netCDF4.num2date are timezone naive.
# Use replace(tzinfo=pytz.UTC) to assign a timezone.
tvals = [
d.replace(tzinfo=pytz.UTC).timestamp() for d in
netCDF4.num2date(var[:], var.units, 'standard')]
except IndexError as exc:
# most likely has a dimension of 0
_logger.error(
"%s: %s: cannot index variable %s",
os.path.split(ncpath)[1],
exc, dsinfo['time_name'])
return slice(0)
except TypeError:
if base_time:
_logger.warning(
"%s: %s: cannot parse units: %s. "
"Using base_time instead",
os.path.split(ncpath)[1],
dsinfo['time_name'], var.units)
tvals = [base_time + val for val in var[:]]
else:
_logger.error(
"%s: %s: cannot parse units: %s",
os.path.split(ncpath)[1],
dsinfo['time_name'], var.units)
tvals = [val for val in var[:]]
else:
try:
tvals = [base_time + val for val in var[:]]
except IndexError as exc:
# most likely has a dimension of 0
_logger.error(
"%s: %s: cannot index variable %s",
os.path.split(ncpath)[1],
exc, dsinfo['time_name'])
return slice(0)
# pylint: disable=pointless-string-statement
"""
tvals = [
d.timestamp() for d in
netCDF4.num2date(var[:], var.units, 'standard')]
"""
if len(tvals) == 0:
return slice(0)
try:
istart = next(idx for idx, tval in enumerate(tvals) \
if tval >= start_time.timestamp())
# _logger.debug("start_time=%s, file=%s,istart=%d",
# start_time,ncpath,istart)
iend = next(idx for idx, tval in enumerate(reversed(tvals)) \
if tval < end_time.timestamp())
iend = len(tvals) - iend
# _logger.debug("end_time=%s, file=%s,iend=%d",
# end_time,ncpath,iend)
except StopIteration:
return slice(0)
if iend - istart == 0:
return slice(0)
elif iend - istart < 0:
_logger.warning(
"%s: times in file are not ordered, start_time=%s,"
"end_time=%s, file times=%s - %s, istart=%d, iend=%d",
os.path.split(ncpath)[1],
start_time.isoformat(), end_time.isoformat(),
datetime.fromtimestamp(tvals[0], tz=pytz.utc).isoformat(),
datetime.fromtimestamp(tvals[-1], tz=pytz.utc).isoformat(),
istart, iend)
return slice(0)
elif debug:
_logger.debug(
"%s: tvals[%d]=%s, tvals[%d]=%s, "
"start_time=%s, end_time=%s",
os.path.split(ncpath)[1],
istart,
datetime.fromtimestamp(
tvals[istart], tz=pytz.utc).isoformat(),
iend,
datetime.fromtimestamp(
tvals[iend-1], tz=pytz.utc).isoformat(),
start_time.isoformat(),
end_time.isoformat())
time_slice = slice(istart, iend, 1)
tvals = tvals[time_slice]
tsize = sys.getsizeof(tvals)
if tsize > size_limit:
raise nc_exc.TooMuchDataException(
"too many time values requested, size={0} MB".\
format(tsize/(1000 * 1000)))
times.extend(tvals)
return time_slice
def read_time_series_data(
self, ncfile, ncpath, exp_vname, time_slice, vshape,
selectdim, dim2):
""" Read values of a time-series variable from a netCDF4 dataset.
Args:
ncfile: An opened netCFD4.Dataset.
ncpath: Path to the dataset. netCDF4.Dataset.filepath() is only
supported in netcdf version >= 4.1.2.
exp_vname: Exported name of variable to read.
time_slice: The slice() of time indices to read.
vshape: Shape of the variable in case it isn't in the file
an a filled array should be returned.
selectdim: A dict containing for each dimension name of type
string, the indices of the dimension to read.
For example: {"station":[3,4,5]} to read indices 3,4 and 5
(indexed from 0) of the station dimension for variables
which have that dimension.
dim2: Values for second dimension of the variable, such as height.
Returns:
A numpy.ma.array containing the data read.
"""
dsinfo = self.get_dataset_info()
dsinfo_vars = dsinfo['variables']
debug = False
# which dimension is time?
time_index = dsinfo_vars[exp_vname]["time_index"]
vdtype = dsinfo_vars[exp_vname]["dtype"]
nc_vname = dsinfo_vars[exp_vname]['netcdf_name']
if nc_vname in ncfile.variables:
var = ncfile.variables[nc_vname]
# indices of variable to be read
idx = ()
for idim, dim in enumerate(var.dimensions):
if dim == dsinfo['time_dim_name']:
idx += (time_slice,)
elif dim == "sample":
# high rate files with a sample dimension
# Add support for this eventually. For now
# just grab first value
idx += (0,)
elif dim in selectdim:
# variable has a selected dimension
try:
if all(i < 0 for i in selectdim[dim]):
sized = len(ncfile.dimensions[dim])
idx += (slice(0, sized), )
else:
idx += \
(tuple([i for i in selectdim[dim] if i >= 0]),)
except TypeError: # not iterable
if selectdim[dim] >= 0:
idx = (selectdim[dim],)
else:
sized = len(ncfile.dimensions[dim])
idx += (slice(0, sized), )
else:
sized = len(ncfile.dimensions[dim])
idx += (slice(0, sized), )
if not dim2:
# dsinfo_vars[exp_vname]['shape'][idim] will
# be the largest value for this dimension
# in the set of files.
sized = dsinfo_vars[exp_vname]['shape'][idim]
dim2['data'] = [i for i in range(sized)]
dim2['name'] = dim
dim2['units'] = ''
if debug and time_slice.stop - time_slice.start > 0:
_logger.debug(
"%s: %s: time_slice.start,"
"time_slice.stop=%d,%d, idx[1:]=%s",
os.path.split(ncpath)[1], nc_vname,
time_slice.start, time_slice.stop,
repr(idx[1:]))
# extract the data from var
vdata = var[idx]
fill_val = (
0 if vdata.dtype.kind == 'i' or
vdata.dtype.kind == 'u' else float('nan'))
if isinstance(vdata, np.ma.core.MaskedArray):
vdata = vdata.filled(fill_value=fill_val)
if vdata.dtype != vdtype:
vdata = np.ndarray.astype(vdtype)
if len(vshape) > 0 and tuple(vshape[1:]) != vdata.shape[1:]:
# _logger.debug("vshape[1:]=%d, vdata.shape[1:]=%d",
# vshape[1:], vdata.shape[1:])
# changing shape. Add support for final dimension
# increasing. vshape should be the largest expected shape
shape = list(vdata.shape)
# how much to grow it by
shape[-1] = vshape[-1] - vdata.shape[-1]
vdata = np.append(
vdata, np.ma.array(
data=np.empty(
shape=shape, dtype=vdata.dtype),
mask=True, fill_value=fill_val).filled(),
axis=-1)
else:
# variable is not in file, create NaN filled array
# Determine shape of variable. Change the first, time dimension
# to match the selected period. The remaininng dimension
# in dsinfo_vars[exp_vname]['shape'] is the largest of those
# seen in the selected files.
shape = vshape
shape[time_index] = time_slice.stop - time_slice.start
shape = tuple(shape)
vdtype = dsinfo_vars[exp_vname]["dtype"]
fill_val = (
0 if vdtype.kind == 'i' or
vdtype.kind == 'u' else float('nan'))
vdata = np.ma.array(
data=np.empty(
shape=shape, dtype=vdtype),
mask=True, fill_value=fill_val).filled()
return vdata
def read_time_series(
self,
variables=(),
start_time=pytz.utc.localize(datetime.min),
end_time=pytz.utc.localize(datetime.max),
selectdim=None,
size_limit=1000 * 1000 * 1000,
series=None,
series_name_fmt=None):
""" Read a list of time-series variables from this fileset.
Args:
variables: A list of strs containing time series variable
names to be read.
start_time: A datetime, which is timezone aware, of the start
time of the series to read.
end_time: A datetime, timezone aware, end time of series to read.
selectdim: A dict containing for each dimension name of type
string, the indices of the dimension to read.
For example: {"station":[3,4,5]} to read indices 3,4 and 5
(indexed from 0) of the station dimension for variables
which have that dimension.
size_limit: Limit on the total size in bytes to read, used to
screen huge requests.
series: A list of series to be read by name.
series_fmt: a datetime.strftime format to create a
series name for the data found in each file, based
on the time associated with the file.
If series_name_fmt is None, all data is put in a dictionary
element named ''.
Returns:
A dict containing, by series name:
'time' : list of UTC timestamps,
'data': list of numpy.ndarray containing the data for
each variable,
'vmap': dict by variable name,
containing the index into the series data for the variable,
'dim2': dict by variable name, of values for second dimension
of the data, such as height,
}
Raises:
nc_exc.NoDataFoundException
nc_exc.NoDataException
The 'data' element in the returned dict is a list of numpy arrays,
and not a dict by variable name. The 'vmap' element provides the
mapping from a variable name to an index into 'data'. The data object
is typically JSON-ified and sent to a browser. If it were a dict,
the variable names may contain characters which cause headaches with
JSON and javascript in django templates. For example, the JSON-ified
string is typically passed to javascript in a django template by
surrounding it with single quotes:
var data = jQuery.parseJSON('{{ data }}');
A single quote within the data JSON string causes grief, and we want
to support single quotes in variable names. The only work around I
know of is to convert the single quotes within the string to '\u0027'.
This is, of course, a time-consuming step we want to avoid when
JSON-ifying a large chunk of data. It is less time-consuming to
replace the quotes in the smaller vmap.
The series names will not contain single quotes.
"""
debug = False
dsinfo = self.get_dataset_info()
if not dsinfo['time_name']:
self.get_variables()
dsinfo = self.get_dataset_info()
dsinfo_vars = dsinfo['variables']
if not selectdim:
selectdim = {}
vshapes = self.resolve_variable_shapes(variables, selectdim)
res_data = {}
total_size = 0
ntimes = 0
files = self.get_files(start_time, end_time)
if debug:
_logger.debug(
"len(files)=%d, series_name_fmt=%s",
len(files), series_name_fmt)
if series_name_fmt:
file_tuples = [(f.time.strftime(series_name_fmt), f.path) \
for f in files]
else:
file_tuples = [("", f.path) for f in files]
for (series_name, ncpath) in file_tuples:
if series and not series_name in series:
continue
if debug:
_logger.debug("series=%s", str(series))
_logger.debug("series_name=%s ,ncpath=%s", series_name, ncpath)
# the files might be in the process of being moved, deleted, etc
fileok = False
exc = None
for itry in range(0, 3):
try:
ncfile = netCDF4.Dataset(ncpath)
fileok = True
break
except (OSError, RuntimeError) as exc:
time.sleep(itry)
if not fileok:
_logger.error("%s: %s", ncpath, exc)
continue
if not series_name in res_data:
res_data[series_name] = {
'time': [],
'data': [],
'vmap': {},
'dim2': {},
}
otime = res_data[series_name]['time']
odata = res_data[series_name]['data']
ovmap = res_data[series_name]['vmap']
odim2 = res_data[series_name]['dim2']
try:
size1 = sys.getsizeof(otime)
# times are apended to otime
time_slice = self.read_times(
ncfile, ncpath, start_time, end_time, otime,
size_limit - total_size)
# time_slice.start is None if nothing to read
if time_slice.start is None or \
time_slice.stop <= time_slice.start:
continue
total_size += sys.getsizeof(otime) - size1
for exp_vname in variables:
# skip if variable is not a time series or
# doesn't have a selected dimension
if not exp_vname in dsinfo_vars or not exp_vname in vshapes:
continue
# selected shape of this variable
vshape = vshapes[exp_vname]
vsize = reduce_(
operator.mul, vshape, 1) * \
dsinfo_vars[exp_vname]["dtype"].itemsize
if total_size + vsize > size_limit:
raise nc_exc.TooMuchDataException(
"too much data requested, will exceed {} mbytes".
format(size_limit/(1000 * 1000)))
dim2 = {}
vdata = self.read_time_series_data(
ncfile, ncpath, exp_vname, time_slice, vshape,
selectdim, dim2)
if not exp_vname in odim2:
odim2[exp_vname] = dim2
if not exp_vname in ovmap:
size1 = 0
vindex = len(odata)
odata.append(vdata)
ovmap[exp_vname] = vindex
else:
if debug:
_logger.debug(
"odata[%s].shape=%s, vdata.shape=%s",
exp_vname, odata[exp_vname].shape, vdata.shape)
vindex = ovmap[exp_vname]
size1 = sys.getsizeof(odata[vindex])
time_index = dsinfo_vars[exp_vname]["time_index"]
odata[vindex] = np.append(
odata[vindex], vdata, axis=time_index)
total_size += sys.getsizeof(odata[vindex]) - size1
finally:
ncfile.close()
ntimes += len(otime)
if ntimes == 0:
exc = nc_exc.NoDataException(
"No data found between {} and {}".
format(
start_time.isoformat(),
end_time.isoformat()))
# _logger.warning("%s: %s", str(self), repr(exc))
raise exc
ncol_read = sum([len(cdata) for (i, cdata) in res_data.items()])
if ncol_read == 0:
exc = nc_exc.NoDataException(
"No variables named {} found between {} and {}".
format(
repr(variables),
start_time.isoformat(),
end_time.isoformat()))
# _logger.warning("%s: %s", str(self), repr(exc))
raise exc
if debug:
for series_name in res_data.keys():
for exp_vname in res_data[series_name]['vmap']:
var_index = res_data[series_name]['vmap'][exp_vname]
_logger.debug(
"res_data[%s][%d].shape=%s, exp_vname=%s",
series_name, var_index,
repr(res_data[series_name][var_index].shape),
exp_vname)
_logger.debug(
"total_size=%d", total_size)
return res_data
| nguyenduchien1994/django-ncharts | ncharts/netcdf.py | Python | bsd-2-clause | 42,876 | [
"NetCDF"
] | fa13dff93890734bf1a1b06f386c61497ccfde99ed034a6547cbdce5779236ad |
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from pymatgen.analysis.prototypes import AflowPrototypeMatcher
from pymatgen.util.testing import PymatgenTest
class AflowPrototypeMatcherTest(PymatgenTest):
def test_prototype_matching(self):
af = AflowPrototypeMatcher()
struct = self.get_structure("Sn")
prototype = af.get_prototypes(struct)[0]
self.assertDictEqual(
prototype["tags"],
{
"aflow": "A_cF8_227_a",
"mineral": "diamond",
"pearson": "cF8",
"strukturbericht": "A4",
},
)
struct = self.get_structure("CsCl")
prototype = af.get_prototypes(struct)[0]
self.assertDictEqual(
prototype["tags"],
{
"aflow": "AB_cP2_221_b_a",
"mineral": "",
"pearson": "cP2",
"strukturbericht": "B2",
},
)
struct = self.get_structure("Li2O")
prototype = af.get_prototypes(struct)[0]
self.assertDictEqual(
prototype["tags"],
{
"aflow": "AB2_cF12_225_a_c",
"mineral": "Fluorite",
"pearson": "cF12",
"strukturbericht": "C1",
},
)
| vorwerkc/pymatgen | pymatgen/analysis/tests/test_prototypes.py | Python | mit | 1,375 | [
"pymatgen"
] | 76671448918a98b67b31d1b92bc98736c192dc86f7296007e6c7942b5bff3737 |
"""
DIRAC Wrapper to execute python and system commands with a wrapper, that might
set a timeout.
3 FUNCTIONS are provided:
- shellCall( iTimeOut, cmdSeq, callbackFunction = None, env = None ):
it uses subprocess.Popen class with "shell = True".
If cmdSeq is a string, it specifies the command string to execute through
the shell. If cmdSeq is a sequence, the first item specifies the command
string, and any additional items will be treated as additional shell arguments.
- systemCall( iTimeOut, cmdSeq, callbackFunction = None, env = None ):
it uses subprocess.Popen class with "shell = False".
cmdSeq should be a string, or a sequence of program arguments.
stderr and stdout are piped. callbackFunction( pipeId, line ) can be
defined to process the stdout (pipeId = 0) and stderr (pipeId = 1) as
they are produced
They return a DIRAC.ReturnValue dictionary with a tuple in Value
( returncode, stdout, stderr ) the tuple will also be available upon
timeout error or buffer overflow error.
- pythonCall( iTimeOut, function, \*stArgs, \*\*stKeyArgs )
calls function with given arguments within a timeout Wrapper
should be used to wrap third party python functions
"""
from multiprocessing import Process, Manager
import threading
import time
import select
import os
import sys
import types
import subprocess
import signal
# Very Important:
# Here we can not import directly from DIRAC, since this file it is imported
# at initialization time therefore the full path is necessary
# from DIRAC import S_OK, S_ERROR
from DIRAC.Core.Utilities.ReturnValues import S_OK, S_ERROR
# from DIRAC import gLogger
from DIRAC.FrameworkSystem.Client.Logger import gLogger
__RCSID__ = "$Id$"
USE_WATCHDOG = False
class Watchdog(object):
"""
.. class Watchdog
timeout watchdog decorator
"""
def __init__(self, func, args=None, kwargs=None):
""" c'tor """
self.func = func if callable(func) else None
self.args = args if args else tuple()
self.kwargs = kwargs if kwargs else {}
self.start = self.end = self.pid = None
self.rwEvent = threading.Event()
self.rwEvent.clear()
self.__watchdogThread = None
self.manager = Manager()
self.s_ok_error = self.manager.dict()
self.__executor = Process(target=self.run_func, args=(self.s_ok_error, ))
def run_func(self, s_ok_error):
""" subprocess target
:param Pipe pipe: pipe used for communication
"""
try:
ret = self.func(*self.args, **self.kwargs)
# set rw event
self.rwEvent.set()
for k in ret:
s_ok_error[k] = ret[k]
except Exception as error:
s_ok_error["OK"] = False
s_ok_error["Message"] = str(error)
finally:
# clear rw event
self.rwEvent.clear()
def watchdog(self):
""" watchdog thread target """
while True:
if self.rwEvent.is_set() or time.time() < self.end:
time.sleep(5)
else:
break
if not self.__executor.is_alive():
return
else:
# wait until r/w operation finishes
while self.rwEvent.is_set():
time.sleep(5)
continue
# SIGTERM
os.kill(self.pid, signal.SIGTERM)
time.sleep(5)
# SIGKILL
if self.__executor.is_alive():
os.kill(self.pid, signal.SIGKILL)
def __call__(self, timeout=0):
""" decorator execution """
timeout = int(timeout)
ret = {"OK": True, "Value": ""}
if timeout:
self.start = int(time.time())
self.end = self.start + timeout + 2
self.__watchdogThread = threading.Thread(target=self.watchdog)
self.__watchdogThread.daemon = True
self.__watchdogThread.start()
ret = {"OK": False, "Message": "Timeout after %s seconds" % timeout,
"Value": (1, '', '')}
try:
self.__executor.start()
time.sleep(0.5)
self.pid = self.__executor.pid
if timeout:
self.__executor.join(timeout)
else:
self.__executor.join()
# get results if any, block watchdog by setting rwEvent
if not self.__executor.is_alive():
self.rwEvent.set()
for k in self.s_ok_error.keys():
ret[k] = self.s_ok_error[k]
self.rwEvent.clear()
except Exception as error:
return {"OK": False, "Message": str(error),
"Value": (2, '', '')}
return ret
class Subprocess:
"""
.. class:: Subprocess
"""
def __init__(self, timeout=False, bufferLimit=52428800):
""" c'tor
:param int timeout: timeout in seconds
:param int bufferLimit: buffer size, default 5MB
"""
self.log = gLogger.getSubLogger('Subprocess')
self.timeout = False
try:
self.changeTimeout(timeout)
self.bufferLimit = int(bufferLimit) # 5MB limit for data
except Exception as x:
self.log.exception('Failed initialisation of Subprocess object')
raise x
self.child = None
self.childPID = 0
self.childKilled = False
self.callback = None
self.bufferList = []
self.cmdSeq = []
def changeTimeout(self, timeout):
""" set the time out limit to :timeout: seconds
:param int timeout: time out in seconds
"""
self.timeout = int(timeout)
if self.timeout == 0:
self.timeout = False
#self.log.debug( 'Timeout set to', timeout )
def __readFromFD(self, fd, baseLength=0):
""" read from file descriptior :fd:
:param fd: file descriptior
:param int baseLength: ???
"""
dataString = ''
redBuf = " "
while len(redBuf) > 0:
redBuf = os.read(fd, 8192)
dataString += redBuf
if len(dataString) + baseLength > self.bufferLimit:
self.log.error('Maximum output buffer length reached',
"First and last data in buffer: \n%s \n....\n %s " % (dataString[:100], dataString[-100:]))
retDict = S_ERROR('Reached maximum allowed length (%d bytes) '
'for called function return value' % self.bufferLimit)
retDict['Value'] = dataString
return retDict
return S_OK(dataString)
def __executePythonFunction(self, function, writePipe, *stArgs, **stKeyArgs):
"""
execute function :funtion: using :stArgs: and :stKeyArgs:
"""
from DIRAC.Core.Utilities import DEncode
try:
os.write(writePipe, DEncode.encode(S_OK(function(*stArgs, **stKeyArgs))))
except OSError as x:
if str(x) == '[Errno 32] Broken pipe':
# the parent has died
pass
except Exception as x:
self.log.exception('Exception while executing', function.__name__)
os.write(writePipe, DEncode.encode(S_ERROR(str(x))))
# HACK: Allow some time to flush logs
time.sleep(1)
try:
os.close(writePipe)
finally:
os._exit(0)
def __selectFD(self, readSeq, timeout=False):
""" select file descriptor from :readSeq: list """
validList = []
for fd in readSeq:
try:
os.fstat(fd)
validList.append(fd)
except OSError:
pass
if not validList:
return False
if self.timeout and not timeout:
timeout = self.timeout
if not timeout:
return select.select(validList, [], [])[0]
return select.select(validList, [], [], timeout)[0]
def __killPid(self, pid, sig=9):
""" send signal :sig: to process :pid:
:param int pid: process id
:param int sig: signal to send, default 9 (SIGKILL)
"""
try:
os.kill(pid, sig)
except Exception as x:
if str(x) != '[Errno 3] No such process':
self.log.exception('Exception while killing timed out process')
raise x
def __poll(self, pid):
""" wait for :pid: """
try:
return os.waitpid(pid, os.WNOHANG)
except os.error:
if self.childKilled:
return False
return None
def killChild(self, recursive=True):
""" kill child process
:param boolean recursive: flag to kill all descendants
"""
if self.childPID < 1:
self.log.error("Could not kill child", "Child PID is %s" % self.childPID)
return - 1
os.kill(self.childPID, signal.SIGSTOP)
if recursive:
for gcpid in getChildrenPIDs(self.childPID, lambda cpid: os.kill(cpid, signal.SIGSTOP)):
try:
os.kill(gcpid, signal.SIGKILL)
self.__poll(gcpid)
except Exception:
pass
self.__killPid(self.childPID)
# HACK to avoid python bug
# self.child.wait()
exitStatus = self.__poll(self.childPID)
i = 0
while exitStatus is None and i < 1000:
i += 1
time.sleep(0.000001)
exitStatus = self.__poll(self.childPID)
try:
exitStatus = os.waitpid(self.childPID, 0)
except os.error:
pass
self.childKilled = True
if exitStatus is None:
return exitStatus
return exitStatus[1]
def pythonCall(self, function, *stArgs, **stKeyArgs):
""" call python function :function: with :stArgs: and :stKeyArgs: """
from DIRAC.Core.Utilities import DEncode
self.log.verbose('pythonCall:', function.__name__)
readFD, writeFD = os.pipe()
pid = os.fork()
self.childPID = pid
if pid == 0:
os.close(readFD)
self.__executePythonFunction(function, writeFD, *stArgs, **stKeyArgs)
# FIXME: the close it is done at __executePythonFunction, do we need it here?
os.close(writeFD)
else:
os.close(writeFD)
readSeq = self.__selectFD([readFD])
if not readSeq:
return S_ERROR("Can't read from call %s" % (function.__name__))
try:
if len(readSeq) == 0:
self.log.debug('Timeout limit reached for pythonCall', function.__name__)
self.__killPid(pid)
# HACK to avoid python bug
# self.wait()
retries = 10000
while os.waitpid(pid, 0) == -1 and retries > 0:
time.sleep(0.001)
retries -= 1
return S_ERROR('%d seconds timeout for "%s" call' % (self.timeout, function.__name__))
elif readSeq[0] == readFD:
retDict = self.__readFromFD(readFD)
os.waitpid(pid, 0)
if retDict['OK']:
dataStub = retDict['Value']
if not dataStub:
return S_ERROR("Error decoding data coming from call")
retObj, stubLen = DEncode.decode(dataStub)
if stubLen == len(dataStub):
return retObj
return S_ERROR("Error decoding data coming from call")
return retDict
finally:
os.close(readFD)
def __generateSystemCommandError(self, exitStatus, message):
""" create system command error
:param int exitStatus: exist status
:param str message: error message
:return: S_ERROR with additional 'Value' tuple ( existStatus, stdoutBuf, stderrBuf )
"""
retDict = S_ERROR(message)
retDict['Value'] = (exitStatus,
self.bufferList[0][0],
self.bufferList[1][0])
return retDict
def __readFromFile(self, fd, baseLength):
""" read from file descriptor :fd: and save it to the dedicated buffer
"""
try:
dataString = ""
fn = fd.fileno()
while fd in select.select([fd], [], [], 1)[0]:
if isinstance(fn, int):
nB = os.read(fn, self.bufferLimit)
else:
nB = fd.read(1)
if nB == "":
break
dataString += nB
# break out of potential infinite loop, indicated by dataString growing beyond reason
if len(dataString) + baseLength > self.bufferLimit:
self.log.error("DataString is getting too long (%s): %s " % (len(dataString), dataString[-10000:]))
break
except Exception as x:
self.log.exception("SUBPROCESS: readFromFile exception")
try:
self.log.error('Error reading', 'type(nB) =%s' % type(nB))
self.log.error('Error reading', 'nB =%s' % str(nB))
except Exception:
pass
return S_ERROR('Can not read from output: %s' % str(x))
if len(dataString) + baseLength > self.bufferLimit:
self.log.error('Maximum output buffer length reached')
retDict = S_ERROR('Reached maximum allowed length (%d bytes) for called '
'function return value' % self.bufferLimit)
retDict['Value'] = dataString
return retDict
return S_OK(dataString)
def __readFromSystemCommandOutput(self, fd, bufferIndex):
""" read stdout from file descriptor :fd: """
retDict = self.__readFromFile(fd,
len(self.bufferList[bufferIndex][0]))
if retDict['OK']:
self.bufferList[bufferIndex][0] += retDict['Value']
if self.callback is not None:
while self.__callLineCallback(bufferIndex):
pass
return S_OK()
else: # buffer size limit reached killing process (see comment on __readFromFile)
exitStatus = self.killChild()
return self.__generateSystemCommandError(exitStatus,
"%s for '%s' call" % (retDict['Message'], self.cmdSeq))
def systemCall(self, cmdSeq, callbackFunction=None, shell=False, env=None):
""" system call (no shell) - execute :cmdSeq: """
if shell:
self.log.verbose('shellCall:', cmdSeq)
else:
self.log.verbose('systemCall:', cmdSeq)
self.cmdSeq = cmdSeq
self.callback = callbackFunction
if sys.platform.find("win") == 0:
closefd = False
else:
closefd = True
try:
self.child = subprocess.Popen(self.cmdSeq,
shell=shell,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=closefd,
env=env)
self.childPID = self.child.pid
except OSError as v:
retDict = S_ERROR(v)
retDict['Value'] = (-1, '', str(v))
return retDict
except Exception as x:
try:
self.child.stdout.close()
self.child.stderr.close()
except Exception:
pass
retDict = S_ERROR(x)
retDict['Value'] = (-1, '', str(x))
return retDict
try:
self.bufferList = [["", 0], ["", 0]]
initialTime = time.time()
exitStatus = self.__poll(self.child.pid)
while (0, 0) == exitStatus or exitStatus is None:
retDict = self.__readFromCommand()
if not retDict['OK']:
return retDict
if self.timeout and time.time() - initialTime > self.timeout:
exitStatus = self.killChild()
self.__readFromCommand()
return self.__generateSystemCommandError(exitStatus,
"Timeout (%d seconds) for '%s' call" %
(self.timeout, cmdSeq))
time.sleep(0.01)
exitStatus = self.__poll(self.child.pid)
self.__readFromCommand()
if exitStatus:
exitStatus = exitStatus[1]
if exitStatus >= 256:
exitStatus /= 256
return S_OK((exitStatus, self.bufferList[0][0], self.bufferList[1][0]))
finally:
try:
self.child.stdout.close()
self.child.stderr.close()
except Exception:
pass
def getChildPID(self):
""" child pid getter """
return self.childPID
def __readFromCommand(self):
""" read child stdout and stderr """
fdList = []
for i in (self.child.stdout, self.child.stderr):
try:
if not i.closed:
fdList.append(i.fileno())
except Exception:
self.log.exception("SUBPROCESS: readFromCommand exception")
readSeq = self.__selectFD(fdList, True)
if readSeq is False:
return S_OK()
if self.child.stdout.fileno() in readSeq:
retDict = self.__readFromSystemCommandOutput(self.child.stdout, 0)
if not retDict['OK']:
return retDict
if self.child.stderr.fileno() in readSeq:
retDict = self.__readFromSystemCommandOutput(self.child.stderr, 1)
if not retDict['OK']:
return retDict
return S_OK()
def __callLineCallback(self, bufferIndex):
""" line callback execution """
nextLineIndex = self.bufferList[bufferIndex][0][self.bufferList[bufferIndex][1]:].find("\n")
if nextLineIndex > -1:
try:
self.callback(bufferIndex,
self.bufferList[bufferIndex][0][self.bufferList[bufferIndex][1]:
self.bufferList[bufferIndex][1] + nextLineIndex])
# Each line processed is taken out of the buffer to prevent the limit from killing us
nL = self.bufferList[bufferIndex][1] + nextLineIndex + 1
self.bufferList[bufferIndex][0] = self.bufferList[bufferIndex][0][nL:]
self.bufferList[bufferIndex][1] = 0
except Exception:
self.log.exception('Exception while calling callback function',
'%s' % self.callback.__name__)
self.log.showStack()
return False
return True
return False
def systemCall(timeout, cmdSeq, callbackFunction=None, env=None, bufferLimit=52428800):
"""
Use SubprocessExecutor class to execute cmdSeq (it can be a string or a sequence)
with a timeout wrapper, it is executed directly without calling a shell
"""
if timeout > 0 and USE_WATCHDOG:
spObject = Subprocess(timeout=timeout, bufferLimit=bufferLimit)
sysCall = Watchdog(spObject.systemCall, args=(cmdSeq, ), kwargs={"callbackFunction": callbackFunction,
"env": env,
"shell": False})
spObject.log.verbose('Subprocess Watchdog timeout set to %d' % timeout)
result = sysCall(timeout + 1)
else:
spObject = Subprocess(timeout, bufferLimit=bufferLimit)
result = spObject.systemCall(cmdSeq,
callbackFunction=callbackFunction,
env=env,
shell=False)
return result
def shellCall(timeout, cmdSeq, callbackFunction=None, env=None, bufferLimit=52428800):
"""
Use SubprocessExecutor class to execute cmdSeq (it can be a string or a sequence)
with a timeout wrapper, cmdSeq it is invoque by /bin/sh
"""
if timeout > 0 and USE_WATCHDOG:
spObject = Subprocess(timeout=timeout, bufferLimit=bufferLimit)
shCall = Watchdog(spObject.systemCall, args=(cmdSeq, ), kwargs={"callbackFunction": callbackFunction,
"env": env,
"shell": True})
spObject.log.verbose('Subprocess Watchdog timeout set to %d' % timeout)
result = shCall(timeout + 1)
else:
spObject = Subprocess(timeout, bufferLimit=bufferLimit)
result = spObject.systemCall(cmdSeq,
callbackFunction=callbackFunction,
env=env,
shell=True)
return result
def pythonCall(timeout, function, *stArgs, **stKeyArgs):
"""
Use SubprocessExecutor class to execute function with provided arguments,
with a timeout wrapper.
"""
if timeout > 0 and USE_WATCHDOG:
spObject = Subprocess(timeout=timeout)
pyCall = Watchdog(spObject.pythonCall, args=(function, ) + stArgs, kwargs=stKeyArgs)
spObject.log.verbose('Subprocess Watchdog timeout set to %d' % timeout)
result = pyCall(timeout + 1)
else:
spObject = Subprocess(timeout)
result = spObject.pythonCall(function, *stArgs, **stKeyArgs)
return result
def __getChildrenForPID(ppid):
"""
Get a list of children pids for ppid
"""
magicCmd = "ps --no-headers --ppid %d -o pid" % ppid
try:
import psutil
childrenList = []
for proc in psutil.process_iter():
if proc.ppid == ppid:
childrenList.append(proc.pid)
return childrenList
except Exception:
exc = subprocess.Popen(magicCmd,
stdout=subprocess.PIPE,
shell=True,
close_fds=True)
exc.wait()
return [int(pid.strip()) for pid in exc.stdout.readlines() if pid.strip()]
def getChildrenPIDs(ppid, foreachFunc=None):
"""
Get all children recursively for a given ppid.
Optional foreachFunc will be executed for each children pid
"""
cpids = __getChildrenForPID(ppid)
pids = []
for pid in cpids:
pids.append(pid)
if foreachFunc:
foreachFunc(pid)
pids.extend(getChildrenPIDs(pid, foreachFunc))
return pids
| arrabito/DIRAC | Core/Utilities/Subprocess.py | Python | gpl-3.0 | 20,685 | [
"DIRAC"
] | 43658ac3d6267cafedfca689060108b458edcda0039fd7f9b5e999f9d35f0f74 |
# -*- coding: utf-8 -*-
"""
Output Plugin for WavPack
Copyright (c) 2006-2008 by Nyaochi
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA, or visit
http://www.gnu.org/copyleft/gpl.html .
"""
from celib import *
class WavPackOutput(OutputModule):
def __init__(self):
self.name = 'wavpack'
self.is_utf8 = False
self.ext = '.wv'
self.cmd = 'wavpack'
self.doc = OutputModuleDocument()
self.doc.tools = (
'WavPack Encoder',
)
self.doc.commands = (self.cmd,)
self.doc.limitations = None
self.doc.tags = (
'TITLE','ARTIST','ALBUM','ALBUMARTIST','GENRE','DATE',
'TRACKNUMBER','TOTALTRACKS','DISCNUMBER','TOTALDISCS','COMMENT'
)
def handle_track(self, track, options):
args = []
# Add the command line to read the source audio.
args.append(track['input_cmdline'])
# Pipe the source audio to this encoder.
args.append('|')
# Add arguments for lame.
args.append(qstr(self.cmd))
args.append(optstr3('-w', 'Title=', track.get('TITLE')))
args.append(optstr3('-w', 'Artist=', track.get('ARTIST')))
args.append(optstr3('-w', 'Album=', track.get('ALBUM')))
args.append(optstr3('-w', 'Albumartist=', track.get('ALBUMARTIST')))
args.append(optstr3('-w', 'Track=', track.get('TRACKNUMBER')))
args.append(optstr3('-w', 'Totaltracks=', track.get('TOTALTRACKS')))
args.append(optstr3('-w', 'Disc=', track.get('DISCNUMBER')))
args.append(optstr3('-w', 'Totaldiscs=', track.get('TOTALDISCS')))
args.append(optstr3('-w', 'Genre=', track.get('GENRE')))
args.append(optstr3('-w', 'Date=', track.get('DATE')))
args.append(optstr3('-w', 'Comment=', track.get('COMMENT')))
args.append(track.get('output_option'))
args.append(track.get('output_option_tag'))
args.append('-')
args.append(qstr(track['output']))
# Execute the command.
cmdline = args_to_string(args)
return self.console.execute(cmdline)
| rinrinne/cueproc-alternative | src/ce_wavpack.py | Python | gpl-2.0 | 2,754 | [
"VisIt"
] | b92df44bb56881cb42c8263e219e5f88a90b54b0073eaf958f31f8a82a0df46d |
# This file is part of the myhdl library, a Python package for using
# Python as a Hardware Description Language.
#
# Copyright (C) 2003-2008 Jan Decaluwe
#
# The myhdl library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation; either version 2.1 of the
# License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
""" myhdl _extractHierarchy module.
"""
import sys
import inspect
from inspect import currentframe, getframeinfo, getouterframes
import re
import string
from types import GeneratorType
import linecache
from myhdl import ExtractHierarchyError, ToVerilogError, ToVHDLError
from myhdl._Signal import _Signal, _isListOfSigs
from myhdl._util import _isGenFunc, _flatten, _genfunc
from myhdl._misc import _isGenSeq
from myhdl._resolverefs import _resolveRefs
_profileFunc = None
class _error:
pass
_error.NoInstances = "No instances found"
_error.InconsistentHierarchy = "Inconsistent hierarchy - are all instances returned ?"
_error.InconsistentToplevel = "Inconsistent top level %s for %s - should be 1"
class _Instance(object):
__slots__ = ['level', 'obj', 'subs', 'sigdict', 'memdict', 'name', 'func', 'argdict', 'objdict']
def __init__(self, level, obj, subs, sigdict, memdict, func, argdict, objdict=None):
self.level = level
self.obj = obj
self.subs = subs
self.sigdict = sigdict
self.memdict = memdict
self.func = func
self.argdict = argdict
if objdict:
self.objdict = objdict
_memInfoMap = {}
class _MemInfo(object):
__slots__ = ['mem', 'name', 'elObj', 'depth', '_used', '_driven', '_read']
def __init__(self, mem):
self.mem = mem
self.name = None
self.depth = len(mem)
self.elObj = mem[0]
self._used = False
self._driven = None
self._read = None
def _getMemInfo(mem):
return _memInfoMap[id(mem)]
def _makeMemInfo(mem):
key = id(mem)
if key not in _memInfoMap:
_memInfoMap[key] = _MemInfo(mem)
return _memInfoMap[key]
def _isMem(mem):
return id(mem) in _memInfoMap
_userCodeMap = {'verilog' : {},
'vhdl' : {}
}
class _UserCode(object):
__slots__ = ['code', 'namespace', 'funcname', 'func', 'sourcefile', 'sourceline']
def __init__(self, code, namespace, funcname, func, sourcefile, sourceline):
self.code = code
self.namespace = namespace
self.sourcefile = sourcefile
self.func = func
self.funcname = funcname
self.sourceline = sourceline
def __str__(self):
try:
code = self._interpolate()
except:
type, value, tb = sys.exc_info()
info = "in file %s, function %s starting on line %s:\n " % \
(self.sourcefile, self.funcname, self.sourceline)
msg = "%s: %s" % (type, value)
self.raiseError(msg, info)
code = "\n%s\n" % code
return code
def _interpolate(self):
return string.Template(self.code).substitute(self.namespace)
class _UserCodeDepr(_UserCode):
def _interpolate(self):
return self.code % self.namespace
class _UserVerilogCode(_UserCode):
def raiseError(self, msg, info):
raise ToVerilogError("Error in user defined Verilog code", msg, info)
class _UserVhdlCode(_UserCode):
def raiseError(self, msg, info):
raise ToVHDLError("Error in user defined VHDL code", msg, info)
class _UserVerilogCodeDepr(_UserVerilogCode, _UserCodeDepr):
pass
class _UserVhdlCodeDepr(_UserVhdlCode, _UserCodeDepr):
pass
class _UserVerilogInstance(_UserVerilogCode):
def __str__(self):
args = inspect.getargspec(self.func)[0]
s = "%s %s(" % (self.funcname, self.code)
sep = ''
for arg in args:
if arg in self.namespace and isinstance(self.namespace[arg], _Signal):
signame = self.namespace[arg]._name
s += sep
sep = ','
s += "\n .%s(%s)" % (arg, signame)
s += "\n);\n\n"
return s
class _UserVhdlInstance(_UserVhdlCode):
def __str__(self):
args = inspect.getargspec(self.func)[0]
s = "%s: entity work.%s(MyHDL)\n" % (self.code, self.funcname)
s += " port map ("
sep = ''
for arg in args:
if arg in self.namespace and isinstance(self.namespace[arg], _Signal):
signame = self.namespace[arg]._name
s += sep
sep = ','
s += "\n %s=>%s" % (arg, signame)
s += "\n );\n\n"
return s
def _addUserCode(specs, arg, funcname, func, frame):
classMap = {
'__verilog__' : _UserVerilogCodeDepr,
'__vhdl__' :_UserVhdlCodeDepr,
'verilog_code' : _UserVerilogCode,
'vhdl_code' :_UserVhdlCode,
'verilog_instance' : _UserVerilogInstance,
'vhdl_instance' :_UserVhdlInstance,
}
namespace = frame.f_globals.copy()
namespace.update(frame.f_locals)
sourcefile = inspect.getsourcefile(frame)
sourceline = inspect.getsourcelines(frame)[1]
for hdl in _userCodeMap:
oldspec = "__%s__" % hdl
codespec = "%s_code" % hdl
instancespec = "%s_instance" % hdl
spec = None
# XXX add warning logic
if instancespec in specs:
spec = instancespec
elif codespec in specs:
spec = codespec
elif oldspec in specs:
spec = oldspec
if spec:
assert id(arg) not in _userCodeMap[hdl]
code = specs[spec]
_userCodeMap[hdl][id(arg)] = classMap[spec](code, namespace, funcname, func, sourcefile, sourceline)
class _CallFuncVisitor(object):
def __init__(self):
self.linemap = {}
def visitAssign(self, node):
if isinstance(node.expr, ast.CallFunc):
self.lineno = None
self.visit(node.expr)
self.linemap[self.lineno] = node.lineno
def visitName(self, node):
self.lineno = node.lineno
class _HierExtr(object):
def __init__(self, name, dut, *args, **kwargs):
global _profileFunc
_memInfoMap.clear()
for hdl in _userCodeMap:
_userCodeMap[hdl].clear()
self.skipNames = ('always_comb', 'instance', \
'always_seq', '_always_seq_decorator', \
'always', '_always_decorator', \
'instances', \
'processes', 'posedge', 'negedge')
self.skip = 0
self.hierarchy = hierarchy = []
self.absnames = absnames = {}
self.level = 0
_profileFunc = self.extractor
sys.setprofile(_profileFunc)
_top = dut(*args, **kwargs)
sys.setprofile(None)
if not hierarchy:
raise ExtractHierarchyError(_error.NoInstances)
self.top = _top
# streamline hierarchy
hierarchy.reverse()
# walk the hierarchy to define relative and absolute names
names = {}
top_inst = hierarchy[0]
obj, subs = top_inst.obj, top_inst.subs
names[id(obj)] = name
absnames[id(obj)] = name
if not top_inst.level == 1:
raise ExtractHierarchyError(_error.InconsistentToplevel % (top_inst.level, name))
for inst in hierarchy:
obj, subs = inst.obj, inst.subs
if id(obj) not in names:
raise ExtractHierarchyError(_error.InconsistentHierarchy)
inst.name = names[id(obj)]
tn = absnames[id(obj)]
for sn, so in subs:
names[id(so)] = sn
absnames[id(so)] = "%s_%s" % (tn, sn)
if isinstance(so, (tuple, list)):
for i, soi in enumerate(so):
sni = "%s_%s" % (sn, i)
names[id(soi)] = sni
absnames[id(soi)] = "%s_%s_%s" % (tn, sn, i)
def extractor(self, frame, event, arg):
if event == "call":
funcname = frame.f_code.co_name
# skip certain functions
if funcname in self.skipNames:
self.skip +=1
if not self.skip:
self.level += 1
elif event == "return":
funcname = frame.f_code.co_name
func = frame.f_globals.get(funcname)
if func is None:
# Didn't find a func in the global space, try the local "self"
# argument and see if it has a method called *funcname*
obj = frame.f_locals.get('self')
if hasattr(obj, funcname):
func = getattr(obj, funcname)
if not self.skip:
isGenSeq = _isGenSeq(arg)
if isGenSeq:
specs = {}
for hdl in _userCodeMap:
spec = "__%s__" % hdl
if spec in frame.f_locals and frame.f_locals[spec]:
specs[spec] = frame.f_locals[spec]
spec = "%s_code" % hdl
if func and hasattr(func, spec) and getattr(func, spec):
specs[spec] = getattr(func, spec)
spec = "%s_instance" % hdl
if func and hasattr(func, spec) and getattr(func, spec):
specs[spec] = getattr(func, spec)
if specs:
_addUserCode(specs, arg, funcname, func, frame)
# building hierarchy only makes sense if there are generators
if isGenSeq and arg:
sigdict = {}
memdict = {}
argdict = {}
if func:
arglist = inspect.getargspec(func).args
else:
arglist = []
symdict = frame.f_globals.copy()
symdict.update(frame.f_locals)
cellvars = []
cellvars.extend(frame.f_code.co_cellvars)
#All nested functions will be in co_consts
if func:
local_gens = []
consts = func.func_code.co_consts
for item in _flatten(arg):
genfunc = _genfunc(item)
if genfunc.func_code in consts:
local_gens.append(item)
if local_gens:
objlist = _resolveRefs(symdict, local_gens)
cellvars.extend(objlist)
#for dict in (frame.f_globals, frame.f_locals):
for n, v in symdict.items():
# extract signals and memories
# also keep track of whether they are used in generators
# only include objects that are used in generators
## if not n in cellvars:
## continue
if isinstance(v, _Signal):
sigdict[n] = v
if n in cellvars:
v._markUsed()
if _isListOfSigs(v):
m = _makeMemInfo(v)
memdict[n] = m
if n in cellvars:
m._used = True
# save any other variable in argdict
if (n in arglist) and (n not in sigdict) and (n not in memdict):
argdict[n] = v
subs = []
for n, sub in frame.f_locals.items():
for elt in _inferArgs(arg):
if elt is sub:
subs.append((n, sub))
inst = _Instance(self.level, arg, subs, sigdict, memdict, func, argdict)
self.hierarchy.append(inst)
self.level -= 1
if funcname in self.skipNames:
self.skip -= 1
def _inferArgs(arg):
c = [arg]
if isinstance(arg, (tuple, list)):
c += list(arg)
return c
| palashahuja/myhdl | myhdl/_extractHierarchy.py | Python | lgpl-2.1 | 13,013 | [
"VisIt"
] | d62b23340b6d89326ade2e77a3812eb6cb0872d2337f355625ccd849fa2533aa |
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# This is the class you derive to create a plugin
from airflow.plugins_manager import AirflowPlugin
from flask import Blueprint
from flask_admin import BaseView, expose
from flask_admin.base import MenuLink
# Importing base classes that we need to derive
from airflow.hooks.base_hook import BaseHook
from airflow.models import BaseOperator
from airflow.sensors.base_sensor_operator import BaseSensorOperator
from airflow.executors.base_executor import BaseExecutor
# Will show up under airflow.hooks.test_plugin.PluginHook
class PluginHook(BaseHook):
pass
# Will show up under airflow.operators.test_plugin.PluginOperator
class PluginOperator(BaseOperator):
pass
# Will show up under airflow.sensors.test_plugin.PluginSensorOperator
class PluginSensorOperator(BaseSensorOperator):
pass
# Will show up under airflow.executors.test_plugin.PluginExecutor
class PluginExecutor(BaseExecutor):
pass
# Will show up under airflow.macros.test_plugin.plugin_macro
def plugin_macro():
pass
# Creating a flask admin BaseView
class TestView(BaseView):
@expose('/')
def test(self):
# in this example, put your test_plugin/test.html template at airflow/plugins/templates/test_plugin/test.html
return self.render("test_plugin/test.html", content="Hello galaxy!")
v = TestView(category="Test Plugin", name="Test View")
# Creating a flask blueprint to intergrate the templates and static folder
bp = Blueprint(
"test_plugin", __name__,
template_folder='templates', # registers airflow/plugins/templates as a Jinja template folder
static_folder='static',
static_url_path='/static/test_plugin')
ml = MenuLink(
category='Test Plugin',
name='Test Menu Link',
url='https://airflow.incubator.apache.org/')
# Defining the plugin class
class AirflowTestPlugin(AirflowPlugin):
name = "test_plugin"
operators = [PluginOperator]
sensors = [PluginSensorOperator]
hooks = [PluginHook]
executors = [PluginExecutor]
macros = [plugin_macro]
admin_views = [v]
flask_blueprints = [bp]
menu_links = [ml]
| akosel/incubator-airflow | tests/plugins/test_plugin.py | Python | apache-2.0 | 2,901 | [
"Galaxy"
] | 85edb103b7d4766f8fe57a3d5b622a54a7c76b5e32f38a24e5af67a4016d81c7 |
#!/usr/bin/env python
""" update local cfg
"""
from __future__ import print_function
import os
from DIRAC.Core.Base import Script
Script.setUsageMessage('\n'.join([__doc__.split('\n')[1],
'Usage:',
' %s [option|cfgFile] ... DB ...' % Script.scriptName]))
Script.parseCommandLine()
args = Script.getPositionalArgs()
setupName = args[0]
# Where to store outputs
if not os.path.isdir('%s/sandboxes' % setupName):
os.makedirs('%s/sandboxes' % setupName)
# now updating the CS
from DIRAC.ConfigurationSystem.Client.CSAPI import CSAPI
csAPI = CSAPI()
csAPI.setOption('Systems/WorkloadManagement/Production/Services/SandboxStore/BasePath', '%s/sandboxes' % setupName)
csAPI.setOption('Systems/WorkloadManagement/Production/Services/SandboxStore/LogLevel', 'DEBUG')
# Now setting a SandboxSE as the following:
# ProductionSandboxSE
# {
# BackendType = DISET
# AccessProtocol = dips
# DIP
# {
# Host = localhost
# Port = 9196
# ProtocolName = DIP
# Protocol = dips
# Path = /scratch/workspace/%s/sandboxes % setupName
# Access = remote
# }
# }
res = csAPI.createSection('Resources/StorageElements/')
if not res['OK']:
print(res['Message'])
exit(1)
res = csAPI.createSection('Resources/StorageElements/ProductionSandboxSE')
if not res['OK']:
print(res['Message'])
exit(1)
csAPI.setOption('Resources/StorageElements/ProductionSandboxSE/BackendType', 'DISET')
csAPI.setOption('Resources/StorageElements/ProductionSandboxSE/AccessProtocol', 'dips')
res = csAPI.createSection('Resources/StorageElements/ProductionSandboxSE/DIP')
if not res['OK']:
print(res['Message'])
exit(1)
csAPI.setOption('Resources/StorageElements/ProductionSandboxSE/DIP/Host', 'localhost')
csAPI.setOption('Resources/StorageElements/ProductionSandboxSE/DIP/Port', '9196')
csAPI.setOption('Resources/StorageElements/ProductionSandboxSE/DIP/ProtocolName', 'DIP')
csAPI.setOption('Resources/StorageElements/ProductionSandboxSE/DIP/Protocol', 'dips')
csAPI.setOption('Resources/StorageElements/ProductionSandboxSE/DIP/Access', 'remote')
csAPI.setOption('Resources/StorageElements/ProductionSandboxSE/DIP/Path', '%s/sandboxes' % setupName)
# Now setting a FileCatalogs section as the following:
# FileCatalogs
# {
# FileCatalog
# {
# AccessType = Read-Write
# Status = Active
# Master = True
# }
# TSCatalog
# {
# CatalogType = TSCatalog
# AccessType = Write
# Status = Active
# CatalogURL = Transformation/TransformationManager
# }
# }
res = csAPI.createSection('Resources/FileCatalogs/')
if not res['OK']:
print(res['Message'])
exit(1)
res = csAPI.createSection('Resources/FileCatalogs/FileCatalog')
if not res['OK']:
print(res['Message'])
exit(1)
csAPI.setOption('Resources/FileCatalogs/FileCatalog/AccessType', 'Read-Write')
csAPI.setOption('Resources/FileCatalogs/FileCatalog/Status', 'Active')
csAPI.setOption('Resources/FileCatalogs/FileCatalog/Master', 'True')
res = csAPI.createSection('Resources/FileCatalogs/TSCatalog')
if not res['OK']:
print(res['Message'])
exit(1)
csAPI.setOption('Resources/FileCatalogs/TSCatalog/CatalogType', 'TSCatalog')
csAPI.setOption('Resources/FileCatalogs/TSCatalog/AccessType', 'Write')
csAPI.setOption('Resources/FileCatalogs/TSCatalog/Status', 'Active')
csAPI.setOption('Resources/FileCatalogs/TSCatalog/CatalogURL', 'Transformation/TransformationManager')
# Now setting up the following option:
# Resources
# {
# Sites
# {
# DIRAC
# {
# DIRAC.Jenkins.ch
# {
# CEs
# {
# jenkins.cern.ch
# {
# CEType = Test
# Queues
# {
# jenkins-queue_not_important
# {
# maxCPUTime = 200000
# SI00 = 2400
# }
# }
# }
# }
# }
# }
# }
for st in ['Resources/Sites/DIRAC/',
'Resources/Sites/DIRAC/DIRAC.Jenkins.ch',
'Resources/Sites/DIRAC/DIRAC.Jenkins.ch/jenkins.cern.ch',
'Resources/Sites/DIRAC/DIRAC.Jenkins.ch/jenkins.cern.ch/Queues'
'Resources/Sites/DIRAC/DIRAC.Jenkins.ch/jenkins.cern.ch/Queues/jenkins-queue_not_important']:
res = csAPI.createSection(st)
if not res['OK']:
print(res['Message'])
exit(1)
csAPI.setOption('Resources/Sites/DIRAC/DIRAC.Jenkins.ch/CEs/jenkins.cern.ch/CEType', 'Test')
csAPI.setOption(
'Resources/Sites/DIRAC/DIRAC.Jenkins.ch/CEs/jenkins.cern.ch/Queues/jenkins-queue_not_important/maxCPUTime',
'200000')
csAPI.setOption('Resources/Sites/DIRAC/DIRAC.Jenkins.ch/CEs/jenkins.cern.ch/Queues/jenkins-queue_not_important/SI00',
'2400')
# Now setting up the following option:
# Resources
# {
# FTSEndpoints
# {
# FTS3
# {
# JENKINS-FTS3 = https://jenkins-fts3.cern.ch:8446
# }
# }
for st in ['Resources/FTSEndpoints/',
'Resources/FTSEndpoints/FTS3/']:
res = csAPI.createSection(st)
if not res['OK']:
print(res['Message'])
exit(1)
csAPI.setOption('Resources/FTSEndpoints/FTS3/JENKINS-FTS3', 'https://jenkins-fts3.cern.ch:8446')
# Now setting a RSS section as the following inside /Operations/Defaults:
#
# ResourceStatus
# {
# Config
# {
# Cache = 600
# State = Active
# FromAddress = fstagni@cern.ch
# notificationGroups = ShiftersGroup
# StatusTypes
# {
# default = all
# StorageElement = ReadAccess
# StorageElement += WriteAccess
# StorageElement += CheckAccess
# StorageElement += RemoveAccess
# }
# }
# Policies
# {
# AlwaysActiveForResource
# {
# matchParams
# {
# element = Resource
# }
# policyType = AlwaysActive
# }
# AlwaysBannedForSE1SE2
# {
# matchParams
# {
# name = SE1,SE2
# }
# policyType = AlwaysBanned
# }
# AlwaysBannedForSite
# {
# matchParams
# {
# element = Site
# }
# policyType = AlwaysBanned
# }
# }
# }
res = csAPI.createSection('Operations/')
if not res['OK']:
print(res['Message'])
exit(1)
res = csAPI.createSection('Operations/Defaults')
if not res['OK']:
print(res['Message'])
exit(1)
res = csAPI.createSection('Operations/Defaults/ResourceStatus')
if not res['OK']:
print(res['Message'])
exit(1)
res = csAPI.createSection('Operations/Defaults/ResourceStatus/Config')
if not res['OK']:
print(res['Message'])
exit(1)
csAPI.setOption('Operations/Defaults/ResourceStatus/Config/Cache', '600')
csAPI.setOption('Operations/Defaults/ResourceStatus/Config/State', 'Active')
csAPI.setOption('Operations/Defaults/ResourceStatus/Config/FromAddress', 'fstagni@cern.ch')
csAPI.setOption('Operations/Defaults/ResourceStatus/Config/notificationGroups', 'ShiftersGroup')
res = csAPI.createSection('Operations/Defaults/ResourceStatus/Config/StatusTypes')
if not res['OK']:
print(res['Message'])
exit(1)
csAPI.setOption('Operations/Defaults/ResourceStatus/Config/StatusTypes/default', 'all')
csAPI.setOption('Operations/Defaults/ResourceStatus/Config/StatusTypes/StorageElement',
'ReadAccess,WriteAccess,CheckAccess,RemoveAccess')
res = csAPI.createSection('Operations/Defaults/ResourceStatus/Policies')
if not res['OK']:
print(res['Message'])
exit(1)
res = csAPI.createSection('Operations/Defaults/ResourceStatus/Policies/AlwaysActiveForResource')
if not res['OK']:
print(res['Message'])
exit(1)
csAPI.setOption('Operations/Defaults/ResourceStatus/Policies/AlwaysActiveForResource/policyType', 'AlwaysActive')
res = csAPI.createSection('Operations/Defaults/ResourceStatus/Policies/AlwaysActiveForResource/matchParams')
if not res['OK']:
print(res['Message'])
exit(1)
csAPI.setOption('Operations/Defaults/ResourceStatus/Policies/AlwaysActiveForResource/matchParams/element', 'Resource')
res = csAPI.createSection('Operations/Defaults/ResourceStatus/Policies/AlwaysBannedForSE1SE2')
if not res['OK']:
print(res['Message'])
exit(1)
csAPI.setOption('Operations/Defaults/ResourceStatus/Policies/AlwaysBannedForSE1SE2/policyType', 'AlwaysBanned')
res = csAPI.createSection('Operations/Defaults/ResourceStatus/Policies/AlwaysBannedForSE1SE2/matchParams')
if not res['OK']:
print(res['Message'])
exit(1)
csAPI.setOption('Operations/Defaults/ResourceStatus/Policies/AlwaysBannedForSE1SE2/matchParams/name', 'SE1,SE2')
res = csAPI.createSection('Operations/Defaults/ResourceStatus/Policies/AlwaysBannedForSite')
if not res['OK']:
print(res['Message'])
exit(1)
res = csAPI.createSection('Operations/Defaults/ResourceStatus/Policies/AlwaysBannedForSite/matchParams')
csAPI.setOption('Operations/Defaults/ResourceStatus/Policies/AlwaysBannedForSite/policyType', 'AlwaysBanned')
csAPI.setOption('Operations/Defaults/ResourceStatus/Policies/AlwaysBannedForSite/matchParams/element', 'Site')
# Now setting the catalog list in Operations/Defults/Services/Catalogs/CatalogList
#
# Services
# {
# Catalogs
# {
# CatalogList = FileCatalog, TSCatalog
# }
# }
res = csAPI.createSection('Operations/Defaults/Services')
if not res['OK']:
print(res['Message'])
exit(1)
res = csAPI.createSection('Operations/Defaults/Services/Catalogs')
if not res['OK']:
print(res['Message'])
exit(1)
res = csAPI.createSection('Operations/Defaults/Services/Catalogs/CatalogList')
if not res['OK']:
print(res['Message'])
exit(1)
csAPI.setOption('Operations/Defaults/Services/Catalogs/CatalogList', 'FileCatalog, TSCatalog')
# Now setting the Registry section
#
# Registry
# {
# VO
# {
# Jenkins
# {
# VOMSName = myVOMS
# }
# }
# }
res = csAPI.createSection('Registry')
if not res['OK']:
print(res['Message'])
exit(1)
res = csAPI.createSection('Registry/VO/')
if not res['OK']:
print(res['Message'])
exit(1)
res = csAPI.createSection('Registry/VO/Jenkins')
if not res['OK']:
print(res['Message'])
exit(1)
res = csAPI.createSection('Registry/VO/Jenkins/VOMSName')
if not res['OK']:
print(res['Message'])
exit(1)
csAPI.setOption('Registry/VO/Jenkins/VOMSName', 'myVOMS')
# Final action: commit in CS
csAPI.commit()
| fstagni/DIRAC | tests/Jenkins/dirac-cfg-update-server.py | Python | gpl-3.0 | 10,655 | [
"DIRAC"
] | 8e201cec82caf7bd072363e1b3ceb0b4854ba22cdeeb208c6dd5b7a79741e955 |
from distutils.core import setup
GNACS_VERSION_NUMBER = "1.0.1"
if '__main__' == __name__:
setup(
name='gnacs',
version=GNACS_VERSION_NUMBER,
author='Scott Hendrickson, Josh Montague, Jinsub Hong, Jeff Kolb, Brian Lehman, Fiona Pigott',
author_email='drskippy@twitter.com',
packages=['acscsv'],
scripts=['gnacs.py'],
url='https://github.com/DrSkippy/Gnacs',
download_url='https://github.com/DrSkippy/Gnacs/tags/%s'%(GNACS_VERSION_NUMBER),
license='LICENSE.txt',
description='Gnip normalized activity JSON to csv parser (Twitter, Disqus Comments, and Wordpress Posts and Comments)',
install_requires=[
"ujson >= 1.2",
]
)
| DrSkippy/Gnacs | setup.py | Python | bsd-2-clause | 767 | [
"Brian"
] | e763bcd3f48ab0bf5746f7177be7143d9e2f53f0056561980293f1fe01d9fae4 |
from __future__ import unicode_literals
import json
import requests
from bs4 import BeautifulSoup
from errors import AuthenticationError
HOME_URL = 'https://mol.medicover.pl/'
LOGIN_URL = 'https://mol.medicover.pl/Users/Account/LogOn'
FORM_URL = 'https://mol.medicover.pl/api/MyVisits/SearchFreeSlotsToBook/FormModel?'
AVAILABLE_VISITS_URL = 'https://mol.medicover.pl/api/MyVisits/SearchFreeSlotsToBook?language=pl-PL'
BOOK_VISIT_URL = 'https://mol.medicover.pl/MyVisits/BookingAppointmentProcess/Confirm'
class API(object):
def __init__(self, user, password):
self.session = requests.Session()
self.session.headers = {'User-Agent': 'Mozilla/5.0', 'X-Requested-With': 'XMLHttpRequest'}
self.log_in(user, password)
def _get_verification_token(self):
response = self.session.get(HOME_URL)
response.raise_for_status()
parsed_html = BeautifulSoup(response.content, 'html.parser')
verification_token = parsed_html.select('input[name="__RequestVerificationToken"]')[0]['value']
return verification_token
def log_in(self, user, password):
payload = {
'userNameOrEmail': user,
'password': password,
'__RequestVerificationToken': self._get_verification_token()
}
response = self.session.post(LOGIN_URL, data=payload, allow_redirects=False)
if response.status_code != 302: # medicover backend redirects on successful login
raise AuthenticationError
def get_form_data(self, request_params=None):
if request_params is None:
request_params = {}
response = self.session.get(FORM_URL, params=request_params)
return json.loads(response.content)
def get_available_visits(self, request_data=None):
if request_data is None:
request_data = {}
response = self.session.post(AVAILABLE_VISITS_URL, data=request_data)
available_visits = response.json()['items']
return available_visits
def book_visit(self, visit_id):
# Parsing HTML is required to get the verification token and visit data from the form tag
confirmation_page = self.session.get(BOOK_VISIT_URL, params={'id': visit_id})
parsed_html = BeautifulSoup(confirmation_page.content, 'html.parser')
form = parsed_html.find('form', {'action': '/MyVisits/BookingAppointmentProcess/Confirm'})
input_tags = form.find_all('input')
post_data = {input_tag['name']: input_tag['value'] for input_tag in input_tags}
response = self.session.post(BOOK_VISIT_URL, data=post_data)
return response.ok
| jakubkbak/medicover-cli | medicover/api.py | Python | mit | 2,641 | [
"VisIt"
] | 0234e798c750c87ade8ed24897cc6869f6b074bb5c330eab6a602fca3d9d5106 |
#!/usr/bin/python -u
# -*- coding: utf-8 -*-
##### nohup ./infiniCharges.py > output.dat &
################################################################################
#
# InfiniCharges: A program to generate partial charges for periodic systems
# Copyright (C) 2015 Andrea Gabrieli and Marco Sant
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor,
# Boston, MA 02110-1301, USA.
#
# See also http://www.fsf.org/licensing/licenses/gpl.html
#
# InfiniCharges, including its sources and pointers to the authors
# can be found at http://www.physchem.uniss.it/cpc/
#
# Contact Address:
# agabrieli@uniss.it
# msant@uniss.it
#
################################################################################
################################################################################
#
# InfiniCharges is a computer program for generating reliable
# partial charges for molecular simulations in periodic systems.
# It relies on the DM-REPEAT method where the stability of the
# resulting charges, over a large set of fitting regions, is obtained
# through the simultaneous fit of multiple electrostatic potential (ESP)
# configurations together with the total dipole fluctuations (TDF).
#
# This program performs the following kinds of fits:
# M-REPEAT (also standard REPEAT)
# DM-REPEAT (also D-REPEAT)
# PARABOLIC RESTRAINED M-REPEAT
# "RAPPE-GODDARD LIKE" RESTRAINED M-REPEAT
#
################################################################################
import numpy as np
from numpy.linalg import lstsq
import ctypes
from os import getcwd, getpid, path, makedirs
from time import time
from datetime import datetime, timedelta
from collections import Counter
from dir_atautils.atautils import atautils
from settings import settings, Tiny, ata_prefix
print "________ ___________ __________________ "
print "____ _/________ __/_(_)________(_)_⚡ ____/_/ /________ ______________ _____________"
print " __ /__ __ \_ /_ _ /_ __ \_ /_/ / __/ __ \ ՛__ `/_/ ___/_ __ `/ _ \_ ___/"
print "__/ / _ / / / __/_ /_ / / / / / /___ _/ / / // /_/ /_/ / ___ /_/ // __/(__ ) "
print "/___/ /_/ /_//_/ /_/ /_/ /_//_/ \____/ /_/ /_/ \__,_/ /_/ __\__, / \___//____/ "
print " /____/ "
print ""
print "Version 1.0"
print ""
def read_xyz_tr_data(file_unit, starting_line, end_line, names_col, x_col, y_col, z_col):
"""
Return data taken from a snapshot of an xyz like file.
Parameters
----------
file_unit : pointer to an already opened file.
starting_line : integer
it is the first line in the file to be read (1 based)
end_line : integer
it is the last line in the file to be read (1 based)
names_col : integer
it is the column containing the atom types (1 based)
x_col : integer
it is the column containing the x coordinates (1 based)
y_col : integer
it is the column containing the y coordinates (1 based)
z_col : integer
it is the column containing the z coordinates (1 based)
Returns
-------
names : list of size N = (end_line - starting_line + 1)
it contains the atom types
x : list of size N = (end_line - starting_line + 1)
it contains the x coordinates
y : list of size N = (end_line - starting_line + 1)
it contains the y coordinates
z : list of size N = (end_line - starting_line + 1)
it contains the z coordinates
Examples
--------
Read from inf (already opened) from line 3 throug 100 (included), taking
from column 1 the atom types and from column 2 throug 4 x, y, and z
coordinates, respectively.
names,x,y,z = read_xyz_tr_data(inf,3,100,1,2,3,4)
"""
names = []
x = []
y = []
z = []
for i in range(end_line):
line = file_unit.readline()
if (i >= (starting_line - 1) and i < (end_line)):
s_line = line.split()
names.append(s_line[names_col-1])
x.append(float(s_line[x_col-1]))
y.append(float(s_line[y_col-1]))
z.append(float(s_line[z_col-1]))
return names,x,y,z
def read_full_traj_tdf_auto(data):
"""
Return the ATAs for the TDF needed to solve the linear system.
This function requires only the names of the files to be processed
which are contained in "data".
Parameters
----------
data : instance of class Settings
object containing the filename of a trajectory (data.primitive_pos),
the corresponding total dipole moments (data.primitive_tdf),
and a logical variable telling if the latter
comes from CP2K (data.tdf_from_cp2k).
Returns
-------
atb : (N,) float ndarray
dot product between the transposed model data matrix (of size N,M)
and the reference data column matrix (of size M)
ata : (N,N) float ndarray
dot product between the transposed model data matrix (of size N,M)
and itself (of size M,N)
btb : float
dot product between the transposed reference data column matrix and
itself, this is the magnitude of the reference data
n_atoms : int
number of system atoms
nstep :
number of system configurations
Notes
-----
This is a wrapper to the read_full_traj_tdf function which
actually processes the files.
"""
num_lines_p = sum(1 for line in open(data.primitive_pos,"r"))
num_lines_b = sum(1 for line in open(data.primitive_tdf,"r"))
n_atoms = int(open(data.primitive_pos,"r").readline())
nstep = num_lines_p / (n_atoms + 2)
# check consistency between trajectory and dipoles
if data.tdf_from_cp2k: # in CP2K there are 10 lines for each dipole
assert (num_lines_b / 10) == nstep
else: # in .tdf format there is an header of 4 lines and then the dipoles
assert (num_lines_b - 4) == nstep
atb, ata, btb = read_full_traj_tdf(n_atoms,nstep,data)
return atb, ata, btb, n_atoms, nstep
def read_full_traj_tdf(n_atoms, nstep, data):
"""
Return the ATAs for the TDF needed to solve the linear system.
Parameters
----------
n_atoms : int
number of system atoms
nstep : int
number of system configurations
data : instance of class Settings
contains input and check variables
Returns
-------
atb : (N,) float ndarray
dot product between the transposed model data matrix (of size N,M)
and the reference data column matrix (of size M)
ata : (N,N) float ndarray
dot product between the transposed model data matrix (of size N,M)
and itself (of size M,N)
btb : float
dot product between the transposed reference data column matrix and
itself, this is the magnitude of the reference data
"""
in_f_pos = open(data.primitive_pos,"r")
traj_p = np.zeros(nstep*3*n_atoms)
traj_f = np.zeros(nstep*3)
atoms_per_par = data.atoms_per_par
#A_ij elements for TDF (model data)
current_tdf = np.zeros([len(atoms_per_par),nstep*3])
#loop over all frames
for i in xrange(nstep):
names_tmp,x_t,y_t,z_t = read_xyz_tr_data(in_f_pos,3,n_atoms+2,1,2,3,4)
if i == 0:
#check consistency between dipoles trajectory and atoms_per_par
if data.check_types(names_tmp):
names_f = names_tmp
else:
message = "WARNING: atom types in " + data.primitive_pos + \
" do not correspond to those in " + data.coor + \
" This is not a problem if the atoms are sorted " + \
"in the same way. Continue at your own risk."
print message
# Replacing the types read from tdf with the types of data.coor
names_f = data.names
#loop over all atoms positions in given frame
#and accumulate them together according to their charge type
for k, at in enumerate(names_f):
n_idx = atoms_per_par.keys().index(at)
#n_idx = name_idxs[at]
offset=i*3
current_tdf[n_idx][offset+0] += x_t[k]
current_tdf[n_idx][offset+1] += y_t[k]
current_tdf[n_idx][offset+2] += z_t[k]
#average atoms positions, three independent values (x,y,z) for each parameter
if len(current_tdf[0]) > 3:
avg_traj = np.zeros([len(current_tdf),3])
for i,col in enumerate(current_tdf):
avg_traj[i][0] = np.average(col[0::3])
avg_traj[i][1] = np.average(col[1::3])
avg_traj[i][2] = np.average(col[2::3])
current_tdf[i][0::3] = np.subtract(current_tdf[i][0::3], avg_traj[i][0])
current_tdf[i][1::3] = np.subtract(current_tdf[i][1::3], avg_traj[i][1])
current_tdf[i][2::3] = np.subtract(current_tdf[i][2::3], avg_traj[i][2])
# The entire trajectory is on one single frame xyz with n_atom_tdf entries
n_atom_tdf = nstep
# B_j elements for TDF (reference data)
x_f,y_f,z_f = atautils.tdf_smooth(data.primitive_tdf,
data.tdf_from_cp2k, nstep, print_tdf=False)
# The average is computed only if there is more than one dipole
if len(current_tdf[0]) > 3:
x_avg, y_avg, z_avg = np.average(x_f), np.average(y_f), np.average(z_f)
x_f = np.subtract(x_f, x_avg)
y_f = np.subtract(y_f, y_avg)
z_f = np.subtract(z_f, z_avg)
traj_f = np.array(zip(x_f,y_f,z_f)).flatten()
in_f_pos.close()
# current tdf is already the transpose (at) of the A matrix
at = current_tdf
b = traj_f
ata = np.dot(at, np.transpose(at))
atb = np.dot(at,b)
btb = np.dot(np.transpose(b),b)
return atb, ata, btb
def iterative_round_charges(charges, sorted_n_atoms_per_charge, rnd=4):
"""
Round all numbers in the working array to the wanted decimal place,
trying to keep the total sum equal to zero.
The function converges when the total sum of the product of
the working array times the corresponding multiplicity of each element becomes
smaller than 1.0e-10. After 1000 iterations the function moves the
rounding to the next decimal place on the right. If there is no convergence
before reaching the 15th decimal place the function gives up.
Parameters
----------
charges : (N,) float ndarray
working array of numbers to be rounded
sorted_n_atoms_per_charge : (N,) int ndarray
multiplicity of each element in the working array
(e.g., number of atoms sharing same charge value), the order follows
the one of "charges"
rnd : int
decimal place position to be rounded
Returns
-------
new_uc_charge : float
sum value of the product between the rounded working array times
the corresponding multiplicity of each element
new_ch : (N,) float ndarray
rounded working array
rnd : int
decimal place position that has been actually rounded
"""
rounded_charges = np.around(charges,rnd)
rounded_charges_2 = np.zeros(len(charges))
#take the values of working array and round them to the rnd decimal
#place, generate as well a vector with the opposite rounding
#(e.g., 0.04337 becomes 0.0434 in one array and 0.0433 in the other)
for i,ch in enumerate(charges):
offset = np.power(10.0,(-rnd))
if ch - rounded_charges[i] < 0: # ch = 0.12337 rounded = 0.1234
offset = -offset
rounded_charges_2[i] = rounded_charges[i] + offset
fused = zip(rounded_charges, rounded_charges_2)
select = np.zeros(2*len(charges),dtype=int)
select[:len(charges)] = 1
assert np.sum(select) == len(charges)
counter = 0
while True:
#randomly choose the up or down rounding for each value
np.random.shuffle(select)
new_ch = np.zeros(len(charges))
for i in xrange(len(charges)):
new_ch[i] = fused[i][select[i]]
new_uc_charge = np.sum(np.multiply(sorted_n_atoms_per_charge,new_ch))
counter += 1
#break if no neutrality found
if rnd > 14:
print "Iterative rounding uc charge not converged."
break
#neutrality found
if abs(new_uc_charge) < 1.0e-10:
break
#move rounding one decimal place to the right
if counter >= 1000:
rnd +=1
new_uc_charge, new_ch, rnd = iterative_round_charges(charges, sorted_n_atoms_per_charge, rnd)
break
return new_uc_charge, new_ch, rnd
def add_lambda_elements(atb, ata, data):
"""
Return the ATA and ATB matrices to solve the linear system
ATAq = ATB after adding the lagrange multiplier to constrain
the total charge
Parameters
----------
atb : (N,) float ndarray
dot product between the transposed model data matrix (of size N,M)
and the reference data column matrix (of size M)
ata : (N,N) float ndarray
dot product between the transposed model data matrix (of size N,M)
and itself (of size M,N)
data : instance of class Settings
contains input and check variables
Returns
-------
B : (N+1,) float ndarray
equal to atb with one extra element containing the desired total charge value
A : (N+1,N+1) float ndarray
equal to ata with one extra row and column having values corresponding to the
number of atoms belonging to the same type (e.g., without kind symmetry the
values are set to 1). The special element A[N+1][N+1] is set to 0
(it refers to the Lagrange multiplier).
"""
#charge neutrality equation line
neu_line = np.array(data.natoms_per_par, ndmin=2, dtype=float)
neu_prm = np.concatenate((ata, neu_line), axis=0)
# Create extra column and add an extra element (row) to this column
column_extra = np.concatenate((np.transpose(neu_line), np.zeros((1,1), dtype=float)), axis=0)
#add the extra column (axis 1) for lambda parameter
A = np.concatenate((neu_prm, column_extra), axis=1)
charge_wanted=np.array([data.q_tot])
B = np.concatenate((atb, charge_wanted))
return B,A
def compute_chi(btb, atb, ata, q, nvals):
"""
Return the root of the mean squared (RMS) error between reference and
model data for a given set of parameters q.
Parameters
----------
atb : (N,) float ndarray
dot product between the transposed model data matrix (of size N,M)
and the reference data column matrix (of size M)
ata : (N,N) float ndarray
dot product between the transposed model data matrix (of size N,M)
and itself (of size M,N)
btb : float
dot product between the transposed reference data column matrix and
itself, this is the magnitude of the reference data
q : (N,) float ndarray
array of parameters
nvlas : number of equations contributing to the RMS
Returns
-------
RMS : float
the root mean squared error
"""
#X = || B - Aq ||**2
#RMS = sqrt( BTB - 2 qT ATB + qT ATA q )
qt = np.transpose(q)
return np.sqrt((btb - 2.0*np.dot(qt,atb) + np.dot(np.dot(qt,ata),q))/nvals)
def precompute_working_matrices_vdW(data, cube_fn, miss_g):
"""
Return the ATA and ATB matrices together with BTB and the average model
data per parameter for a set of different vdW scaling radius gamma.
Data useful to restart the calculations (number of valid grid points and
the sum of the squared ESP values of the full grid) are also returned.
Parameters
----------
data : instance of class Settings
contains input and check variables
cube_fn : string
the name of the cube file containing the ESP data
miss_g : float list
the values of gamma for which the matrices are to be computed
Returns
-------
results : list of dict (one for each value of miss_g)
each dict contains:
atb : (N,) float ndarray
dot product between the transposed model data matrix (of size N,M)
and the reference data column matrix (of size M)
ata : (N,N) float ndarray
dot product between the transposed model data matrix (of size N,M)
and itself (of size M,N)
btb : float
dot product between the transposed reference data column matrix and
itself, this is the magnitude of the reference data
avg_prm_traj_f : (N,) float ndarray
contains the average value per column of matrix A (M,N)
n_pts: int
number of valid grid points
e2_tot: float
sum of the squared ESP values of the full grid
"""
atautils.init(cube_fn)
results = []
# compute the ESP model data only for the values of gamma contained in miss_g
for gamma in miss_g:
# remove points lying inside a sphere of radius vdW * gamma
ngood_grid_pts, e2_tot = atautils.filter(gamma, False)
# compute model data
e_atb, e_ata, e_btb, avg_prm_traj_f = atautils.get_ata_matrix(15, data.locut, -1.0, ngood_grid_pts, data.natoms_per_par, data.kind_idx, False, data.esp_sign)
# store
matrices = dict(zip(['tail','atb', 'ata', 'btb', 'avg_prm_traj_f','n_pts', 'e2_tot'],[gamma,e_atb, e_ata, e_btb, avg_prm_traj_f,ngood_grid_pts,e2_tot]))
results.append(matrices)
# get atomic numbers for this cube
tmp_z = atautils.get_zatom(data.natoms)
assert save_z_and_check(data,tmp_z), "ERROR: atomic numbers are not consistent across cubes"
print cube_fn
# cleanup of the stored data in the fortran module
atautils.release()
return results
def precompute_working_matrices_frame(data):
"""
Return the ATA and ATB matrices together with BTB accumulated over a set of
different system configurations for a set of different vdW scaling radius
gamma.
Data useful to restart the calculations (sum of the number of valid
grid points and sum of the sum of the squared ESP values of the full grid)
are also returned.
Parameters
----------
data : instance of class Settings
contains input and check variables
Returns
-------
results : list of dict (one for each value of gamma)
each dict contains:
atb : (N,) float ndarray
sum over system configurations of
dot product between the transposed model data matrix (of size N,M)
and the reference data column matrix (of size M)
ata : (N,N) float ndarray
sum over system configurations of
dot product between the transposed model data matrix (of size N,M)
and itself (of size M,N)
btb : float
sum over system configurations of
dot product between the transposed reference data column matrix and
itself, this is the magnitude of the reference data
avg_prm_traj_f : None
this quantity is meaningless for multiple configurations computation
n_pts: int
sum over system configurations of the
number of valid grid points
e2_tot: float
sum over system configurations of the
sum of the squared ESP values of the full grid
"""
# computing the set of gamma over which the computations will be performed
# and store them inside data
g_stop = data.g_start + data.g_step * (data.n_gammas - 1)
data.gammas = np.linspace(data.g_start,g_stop,data.n_gammas)
# check which gammas are already computed and read them,
# also store which ones are missing (miss_g)
miss_g = []
matrices = []
for gamma in data.gammas:
directory = ata_prefix + str(gamma) + "/"
if path.exists(directory):
mat = read_ata(directory)
matrices.append(mat)
if "atomic_data" in mat:
# get and check atomic number
tmp_z = mat["atomic_data"]
assert save_z_and_check(data,tmp_z), "ERROR: atomic numbers are not consistent across ata directories"
print "Reading ESP data from: ", directory
else:
miss_g.append(gamma)
matrices.append("missing")
# compute the (possibly) missing values
if miss_g:
print "Computing ESP data for gammas: "
for ga in miss_g:
print ga
assert data.cube_files, "ERROR: cube files not found while generating missing data for gamma " + str(ga)
cube_files = data.cube_files
nframes = len(cube_files)
tmp_results = []
# actually compute missing atas
for cube_fn in cube_files:
tmp_results.append(precompute_working_matrices_vdW(data,cube_fn,miss_g))
print "Number of cubes is: ", nframes
# store results for first frame
miss_res = tmp_results[0]
# loop over remaining frames and accumulate over the first
for tres in tmp_results[1:]:
# loop over gammas
for i,gres in enumerate(miss_res):
gres['atb'] += tres[i]['atb']
gres['ata'] += tres[i]['ata']
gres['btb'] += tres[i]['btb']
gres['n_pts'] += tres[i]['n_pts']
# avg_prm_traj_f now are meaningless so they are nullified
gres['avg_prm_traj_f'] = None
gres['e2_tot'] += tres[i]['e2_tot']
# set e2_tot reference for check
try:
cmp_e2 = matrices[0]['e2_tot']
except:
cmp_e2 = miss_res[0]['e2_tot']
# fuse the missing matrices with the read ones
cnt = 0
for i,mat in enumerate(matrices):
if mat == "missing":
matrices[i] = miss_res[cnt]
cnt += 1
# check that all gammas have data taken from same cubes
assert abs(cmp_e2 - matrices[i]['e2_tot']) < 10e-4,\
"ERROR: it seems that cubes used to generate ESP data for gamma " + str(matrices[0]['tail']) + " and " +\
str(matrices[i]['tail']) + " do not coincide. "+\
"Total sum of squared ESP data is: %s" % str(cmp_e2) + " vs " + str(matrices[i]['e2_tot'])
# if everything is ok save the missing matrices
for mat in miss_res:
save_ata(mat, data.zatoms)
else:
# check that all gammas have data taken from same cubes (only for the read ata case)
cmp_e2 = matrices[0]['e2_tot']
for i,mat in enumerate(matrices):
assert abs(cmp_e2 - matrices[i]['e2_tot']) < 10e-4,\
"ERROR: it seems that cubes used to generate ESP data for gamma " + str(matrices[0]['tail']) + " and " +\
str(matrices[i]['tail']) + " do not coincide. "+\
"Total sum of squared ESP data is: %s" % str(cmp_e2) + " vs " + str(matrices[i]['e2_tot'])
return matrices
def solve(atbs, atas, btbs, neqs, weights, data):
"""
Return the set of parameters minimizing the sum of the squares
of the errors (least squares) between reference and model data.
The overall set of equations to be solved is made up by C (e.g., one or
more) independent contributions, whose influence over the total
merit function is controlled through opportune weights.
Parameters
----------
atbs : (C,(N,)) float ndarray of (N,) float ndarray
where (N,) comes from the dot product between the transposed model
data matrix (of size N,M)
and the reference data column matrix (of size M)
atas : (C,(N,N)) float ndarray of (N,N) float ndarray
where (N,N) comes from the dot product between the transposed model
data matrix (of size N,M) and itself (of size M,N)
btbs : (C,) float ndarray
where each element comes from the dot product between the transposed
reference data column matrix and itself,
this is the magnitude of the reference data
neqs : (C,) int ndarray
array containing the number of equations for each independent
contribution
weights : (C,) float ndarray
array containing the weight of each independent contribution over the
merit function
data : instance of class Settings
contains input and check variables
Returns
-------
res : (N,) float ndarray
array of optimized parameters
res_full : (N+1,) float ndarray
array of optimized parameters plus the value of extra Lagrange multiplier
for unit cell charge neutralization
hogs : (N,) float ndarray
array containing the relative root mean squared error (RRMS) for the
resulting parameters, evaluated independently for each contribution
hchis : (N,) float ndarray
array containing the root mean squared error (RMS) for each contribution
rchis: (N,) float ndarray
array containing the square of the mean magnitude of the reference data, for each
contribution
"""
# fuse together the various contributions, taking into account their
# weight and magnitude (the latter is needed to normalize each
# contribution
mgnts = weights**2/btbs
atb = np.zeros_like(atbs[0])
ata = np.zeros_like(atas[0])
for i in range(len(atbs)):
atb += atbs[i]*mgnts[i]
ata += atas[i]*mgnts[i]
# add the u.c. total charge neutralization
B,A = add_lambda_elements(atb, ata, data)
res_full = np.linalg.solve(A,B)
res = res_full[:-1]
# evaluate match independently for each contribution
hchis = [compute_chi(btbs[i], atbs[i], atas[i], res, neqs[i]) for i in range(len(atas))]
rchis = np.sqrt(btbs/neqs)
hogs = hchis/rchis
return res, res_full, hogs, hchis, rchis
def apply_restraints(data, e_atb, e_ata, e_btb):
"""
Return the set of parameters minimizing the sum of the squares
of the errors (least squares) between ESP reference and model data,
applying simultaneously parabolic restraints to the parameters.
One or more parameters can be restrained, balancing
their contribution to the figure of merit through global or
individual weights.
X = ... + w_1**2(Q_1-q_1)**2 + w_2**2(Q_2-q_2)**2 + ...
where w_i is the weight of the ith restraint, Q_i the restrained
value itself, and q_i the parameter to be restrained.
Parameters
----------
atb : (N,) float ndarray
dot product between the transposed model data matrix (of size N,M)
and the reference data column matrix (of size M)
ata : (N,N) float ndarray
dot product between the transposed model data matrix (of size N,M)
and itself (of size M,N)
btb : float
dot product between the transposed reference data column matrix and
itself, this is the magnitude of the reference data
data : instance of class Settings
contains input and check variables
Returns
-------
res : (N,) float ndarray
array of optimized parameters
Notes
-----
The ESP part contribution to the merit function is normalized by the
ESP magnitude to allow the use of restraint weights which are independent
from the system size.
Moreover, the restraint contribution is not normalized by the restraint
magnitude to allow the possible restraining of all parameters to 0.0.
"""
# vector containing all weights
r_weight = np.zeros(data.npar, dtype = "float")
# vector containing all restraints
r_b = np.zeros(data.npar, dtype = "float")
for i,key in enumerate(data.atoms_per_par.keys()):
if key in data.restraint_weight:
r_weight[i] = data.restraint_weight[key]
r_b[i] = data.restraints[key]
# restraints must be multiplied by their atom-type symmetry population
r_a = np.identity(data.npar,dtype=float)
r_atb = r_a.T.dot(r_b)*data.natoms_per_par
r_ata = r_a.T.dot(r_a)*data.natoms_per_par
# the magnitude is not used to allow restraining to 0.0
#r_btb = np.sum(r_b*r_b*data.natoms_per_par)
# fuse restraints and ESP
atb = r_atb*r_weight**2 + e_atb/e_btb
ata = r_ata*r_weight**2 + e_ata/e_btb
# solve
B,A = add_lambda_elements(atb, ata, data)
res_full = np.linalg.solve(A,B)
res = res_full[:-1]
return res
def save_z_and_check(data, tmp_z):
"""
Check the consistency of atomic numbers across .cube files and atoms
already processed.
Return True if the atomic number contained in tmp_z coincide with
the ones saved inside data. If no atomic numbers are contained in
data then tmp_z is copied inside it.
Parameters
----------
data : instance of class Settings
contains input and check variables
tmp_z : (N,) int ndarray
contains the atomic numbers of the system atoms
Returns
-------
bool :
True if tmp_z coincide with the ndarray zatoms contained in data or
if zatoms is not present in data and tmp_z is saved.
"""
if not hasattr(data, 'zatoms'):
data.zatoms = tmp_z
assert type_to_z(data), "ERROR: atomic numbers are not consistent across each atom type group"
return True
return np.array_equal(data.zatoms, tmp_z)
def type_to_z(data):
"""
This function associates an atomic nuber z to each atom type
and returns true if all elements of the same type have the same z.
Parameters
----------
data : instance of class Settings
contains input and check variables
Returns
-------
bool :
True if all elements of the same type have the same z.
Notes
-----
Cube files should have atoms in the same order of .xyz (or .pdb) file
provided in the input (i.e., the atom types should coincide).
This cannot be automatically checked but it is possible to compare .xyz atom type
with corresponding atomic number in .cube file (i.e., the atom kind can
be checked).
"""
print "Atom types and atomic number:"
# associate one atomic number Z to each key
atom_grp_index = 0 # index of atoms when these are grouped into types
z_per_par=np.zeros(data.npar,dtype=int)
for i,key in enumerate(data.atoms_per_par.keys()):
# we want the index of the first atom of each type
# found using the "atom_grp_index"
# fed into the "keep_idx" array
# kind_idx[atom_grp_index] index of atom in pdb file
z_per_par[i]=(data.zatoms[data.kind_idx[atom_grp_index]-1])
print "{:<4s}-->{:>4d}".format(key,z_per_par[i])
# check if all atoms belonging to same atom-type symmetry group
# have same Z
for j in range(atom_grp_index, atom_grp_index+data.atoms_per_par[key]):
if (z_per_par[i] != data.zatoms[data.kind_idx[j]-1]):
return False
# update index
atom_grp_index += data.atoms_per_par[key]
data.z_per_par = z_per_par
return True
def apply_rg(data, e_atb, e_ata, e_btb):
"""
Return the set of parameters minimizing the sum of the squares
of the errors (least squares) between ESP reference and model data,
applying simultaneously to the parameters "Rappe-Goddard like" restraints
exactly as implemented in REPEAT.
All parameters are restrained according to the electronegativity and the
self-Coulomb interaction corresponding to the specific atom kind.
X = ... + w_1(E_1 + Y_1*q_1 + 0.5*J_1*q_1**2) + ...
where w_i is the weight of the ith restraint, Y_i the
electronegativity, J_i the self_Coulomb term,
see "Campana et al., JCTC 5 (2009) 2866-2878".
Parameters
----------
atb : (N,) float ndarray
dot product between the transposed model data matrix (of size N,M)
and the reference data column matrix (of size M)
ata : (N,N) float ndarray
dot product between the transposed model data matrix (of size N,M)
and itself (of size M,N)
btb : float
dot product between the transposed reference data column matrix and
itself, this is the magnitude of the reference data
data : instance of class Settings
contains input and check variables
Returns
-------
res : (N,) float ndarray
array of optimized parameters
Notes
-----
This kind of restraint is equivalent to a parabolic restraint of
the parameters to a charge Q_i=(-Y_i/J_i), multiplying the weight of
each restraint by J_i.
"""
# find the atomic number of the atom kind related to each parameter
# check if zatoms are stored and if not retrieve them from the
# first cube file
find_zatom(data)
# self-Coulomb per parameter
joo=data.selci[data.z_per_par-1]*atautils.autokcal
# electronegativity per parameter
xoo=data.eneg[data.z_per_par-1]*atautils.autokcal
# rg_weight (input in au units as REPEAT) converted to kcal/mol
rg_weight=data.rg_weight*atautils.autokcal
print ""
print "This is equivalent to restraining the charges, "
print "with a weight depending on the self Coulomb interaction J, "
print "to values: "
print -data.eneg[data.z_per_par-1]/data.selci[data.z_per_par-1]
print ""
r_atb =(-data.natoms_per_par*rg_weight*0.5*xoo)
r_ata =(
np.identity(data.npar,dtype=float)*
data.natoms_per_par*
rg_weight*0.5*
joo)
# fuse restraints and ESP
atb = r_atb + e_atb
ata = r_ata + e_ata
# solve
B,A = add_lambda_elements(atb, ata, data)
res_full = np.linalg.solve(A,B)
res = res_full[:-1]
return res
def save_ata(mat, atomic_data=None):
"""
Write on file all data required to restart the calculations.
Parameters
----------
mat : dict
containing:
tail : string
complete ata_prefix to identify where data are saved
n_pts : int
sum over system configurations of the
number of valid grid points
atb : (N,) float ndarray
sum over system configurations of
dot product between the transposed model data matrix (of size N,M)
and the reference data column matrix (of size M)
ata : (N,N) float ndarray
sum over system configurations of
dot product between the transposed model data matrix (of size N,M)
and itself (of size M,N)
btb : float
sum over system configurations of the
dot product between the transposed reference data column matrix and
itself, this is the magnitude of the reference data
e2_tot: float
sum over system configurations of the
sum of the squared ESP values of the full grid
atomic_data : list
can contain numbers or string. For ESP data it is the list of atomic
numbers, for TDF data it is the list of atomic types.
"""
directory = ata_prefix + str(mat['tail']) + "/"
if not path.exists(directory):
makedirs(directory)
else:
print "WARNING: ", directory, " overwritten."
ataf = open(directory + "data.dat", "w")
ataf.write("tail " + str(mat['tail']) + "\n")
ataf.write("n_pts " + str(mat['n_pts'])+ "\n")
ataf.write("btb {:>25.15f}\n".format(mat['btb']))
ataf.write("e2_tot {:>18.15e}\n".format(mat['e2_tot']))
mat['atb'].tofile(directory + "atb.bin")
mat['ata'].tofile(directory + "ata.bin")
if atomic_data != None:
ataf.write("atomic_data: ")
for ad in atomic_data:
ataf.write(str(ad) + " ")
ataf.write("\n")
def read_ata(atadir):
"""
Read data saved with save_ata.
Parameters
----------
atadir : string
name of the directory where files are stored
Returns
-------
results : dict
containing:
tail : string
complete ata_prefix to identify where data are saved
n_pts : int
sum over system configurations of the
number of valid grid points
atb : (N,) float ndarray
sum over system configurations of
dot product between the transposed model data matrix (of size N,M)
and the reference data column matrix (of size M)
ata : (N,N) float ndarray
sum over system configurations of
dot product between the transposed model data matrix (of size N,M)
and itself (of size M,N)
btb : float
sum over system configurations of
dot product between the transposed reference data column matrix and
itself, this is the magnitude of the reference data
avg_prm_traj_f : None
this quantity is meaningless for multiple configurations computation
e2_tot: float
sum over system configurations of the
sum of the squared ESP values of the full grid
atomic_data (optional): list
can contain numbers or string. For ESP data it is the list of atomic
numbers, for TDF data it is the list of atomic types.
"""
fnames = [atadir + name for name in ["data.dat","atb.bin","ata.bin"]]
for name in fnames:
assert path.getsize(name) > 0, "ERROR corrupted file: %s" % name
lines = open(fnames[0]).readlines()
slines = [line.split() for line in lines if not "atomic_data:" in line]
atomic_data = np.squeeze(np.array([ad.replace("atomic_data:","").split() for ad in lines if "atomic_data:" in ad]))
if all(ad.isdigit() for ad in atomic_data):
atomic_data = atomic_data.astype(np.int)
results = {key: value for (key,value) in slines}
if results['tail'].isdigit():
results['tail'] = float(results['tail'])
results['n_pts'] = int(results['n_pts'])
results['btb'] = float(results['btb'])
results['e2_tot'] = float(results['e2_tot'])
results['avg_prm_traj_f'] = None
results['atb'] = np.fromfile(fnames[1])
results['ata'] = np.fromfile(fnames[2]).reshape(len(results['atb']),-1)
if atomic_data.size != 0:
results['atomic_data'] = atomic_data
return results
def find_zatom(data):
"""
Check if atomic numbers have been already stored (either read from
a cube or from saved data). If not the function opens the firs cube
file and stores them.
Parameters
----------
data : instance of class Settings
contains input and check variables
"""
if not hasattr(data, 'zatoms'):
cube_fn = data.cube_files[0]
atautils.init(cube_fn)
data.zatoms = atautils.get_zatom(data.natoms)
assert type_to_z(data), "ERROR: atomic numbers are not consistent across each atom type group"
atautils.release()
def main():
dt = settings()
print ""
# TDF data generation
if "ESP+TDF" == dt.fit_kind:
directory = dt.ata_tdf
if path.exists(directory):
print "Reading TDF data from: ", directory
mat=read_ata(directory)
b_atb = mat['atb']
b_ata = mat['ata']
b_btb = mat['btb']
tdf_nequations = mat['n_pts']
# check that the atom-types used for the generation of TDF
# atas are consistent with current .xyz or (.pdb) ones
assert Counter(dt.names) == Counter(mat['atomic_data'])
else:
print "Computing TDF data"
# read TDF data and subtract avgs
b_atb, b_ata, b_btb, tdf_natoms, tdf_nstep = read_full_traj_tdf_auto(dt)
tdf_nequations = tdf_nstep*3
mat = dict(zip(['tail','atb', 'ata', 'btb', 'avg_prm_traj_f','n_pts', 'e2_tot'],['tdf',b_atb, b_ata, b_btb, None, tdf_nequations, 0.0]))
save_ata(mat,dt.names)
assert tdf_natoms == dt.natoms
# solve and store reference TDF charges, these will not change during the entire procedure
res, res_ber, hog_ber, hchi_b, rchi_b = solve(np.array([b_atb]), np.array([b_ata]), np.array([b_btb]),
np.array([tdf_nequations]),np.array([1.0]), dt)
print "Pure TDF charges:"
for k,knd in enumerate(dt.atoms_per_par.keys()):
print "{:<4s}={:>14.8f}".format(knd, res[k])
uc_charge = np.sum(np.multiply(dt.natoms_per_par,res))
if abs(uc_charge) > Tiny:
print "WARNING! Unit cell charge: ", uc_charge, "\n"
print "TDF RRMS: ", hog_ber[0]
print ""
# file containing the most relevant results for all gammas
res_h = open("auto_weight_res.dat", "w")
res_h.write("# gamma rounded charges # iteration_stop num_grid_pts \n")
# generate atas for all ESP configurations and for all gammas
g_matrices = precompute_working_matrices_frame(dt)
# main loop over different gammas (i.e., cube points ensembles for ESP)
for matrices in g_matrices:
gamma = matrices['tail']
print "========================================================================"
print "Working for gamma: ", gamma
# solve and store reference ESP charges, these will not change for a given gamma
ngood_grid_pts = matrices['n_pts']
e_atb = matrices['atb']
e_ata = matrices['ata']
e_btb = matrices['btb']
avg_prm_traj_f = matrices['avg_prm_traj_f']
res, res_esp, hog_esp, hchi_e, rchi_e = solve(np.array([e_atb]),
np.array([e_ata]), np.array([e_btb]), np.array([ngood_grid_pts]),
np.array([1.0]), dt)
print "Pure ESP charges:"
for k,knd in enumerate(dt.atoms_per_par.keys()):
print "{:<4s}={:>14.8f}".format(knd, res[k])
uc_charge = np.sum(np.multiply(dt.natoms_per_par,res))
if abs(uc_charge) > Tiny:
print "WARNING! Unit cell charge: ", uc_charge, "\n"
print "ESP RRMS: ", hog_esp[0]
print ""
#solve for restrained fit
if ("RESP" == dt.fit_kind):
res = apply_restraints(dt, e_atb, e_ata, e_btb)
#solve for "Rappe-Goddard like" restrained fit
if ("RG" == dt.fit_kind):
res = apply_rg(dt, e_atb, e_ata, e_btb)
uc_charge = np.sum(np.multiply(dt.natoms_per_par,res))
if abs(uc_charge) > Tiny:
print "WARNING! Unrounded Unit cell charge: ", uc_charge, "\n"
# if "non TDF" fit, computation for this gamma ends here
if not ("ESP+TDF" == dt.fit_kind):
# rounding resulting charges starting from fourth decimal place
uc_rnd_ch, rnd_ch, rnd = iterative_round_charges(res, dt.natoms_per_par, rnd=4)
# evaluate RRMS for rounded charges
rnd_hchie = compute_chi(e_btb, e_atb, e_ata, rnd_ch, ngood_grid_pts)
print "Final rounded charges:"
fmt="{:<4s}={:>10."+str(rnd)+"f}"
for k,knd in enumerate(dt.atoms_per_par.keys()):
print fmt.format(knd, rnd_ch[k])
if abs(uc_rnd_ch) > Tiny:
print "WARNING! Unit cell charge: ", uc_rnd_ch, "\n"
print "RRMS: ", rnd_hchie/rchi_e[0]
continue
# ESP+TDF fit only
weight_b=dt.weight_tdf
res_f = open("results_" + str(gamma) + ".dat", "w")
res_u = open("results_unrounded_" + str(gamma) + ".dat", "w")
res_lf = open("res_loop_" + str(gamma) + ".dat", "w")
res_lf.write("# weight_tdf delta%_tdf delta%_esp RRMS_tdf RRMS_esp \n")
# arrays to treat together TDF and ESP data
atbs = np.array([b_atb,e_atb])
atas = np.array([b_ata,e_ata])
btbs = np.array([b_btb,e_btb])
neqs = np.array([tdf_nequations,ngood_grid_pts])
done_auto = False
auto_res = None
# loop over various TDF weights (increasing values)
print "Performing weight loops..."
for w in xrange(dt.nw_loops):
weight_b += dt.w_incr
# weight should fall in [0,1]
assert weight_b <= 1.0+Tiny and weight_b >= 0.0-Tiny, "weight_b = %r" % weight_b
weight_e = 1.0-weight_b
weights = np.array([weight_b,weight_e])
res, res_full, hogs, hchis, rchis = solve(atbs, atas, btbs, neqs,
weights, dt)
# evaluate u.c. total charge after fit
uc_charge = np.sum(np.multiply(dt.natoms_per_par,res))
# evaluate RMS independently for ESP and TDF
hchie = hchis[1]
hchib = hchis[0]
# compute the square of the mean magnitude of the reference data
# these have been already computed for pure TDF and ESP
rchi_e = rchis[1]
rchi_b = rchis[0]
# store current RRMS for ESP
current_hge = hogs[1]
# store current RRMS for TDF
current_hgb = hogs[0]
# evaluate improvement/degradation with respect to best ESP and best TDF
delta_ber = (current_hgb - hog_ber[0])/hog_ber[0]
delta_esp = (current_hge - hog_esp[0])/hog_esp[0]
res_lf.write("{:>7.4f} {:>20.16f} {:>20.16f} {:>20.16f} {:>20.16f}\n".format(weight_b,
delta_ber*100.0, delta_esp*100.0, current_hgb, current_hge))
# after first iteration, check if auto-stop criterion is met
# i.e., [(RRMS_E - RRMS_Ebest)/RRMS_Ebest] < [(RRMS_T - RRMS_Tbest)/RRMS_Tbest]
if w > 0 and not done_auto:
if (delta_esp - delta_ber) > 0.0:
auto_res = [[rnd_ch,rnd], w-1, (delta_esp - delta_ber), rnd_hchib/rchi_b, rnd_hchie/rchi_e, uc_rnd_ch]
done_auto = True
# write resulting unrounded parameters for this weight
res_u.write(str(gamma))
for val in res:
res_u.write("{:>20.15f}, ".format(val))
res_u.write(" # " + str(ngood_grid_pts) + "\n")
# rounding resulting charges starting from fourth decimal place
uc_rnd_ch, rnd_ch, rnd = iterative_round_charges(res, dt.natoms_per_par, rnd=4)
# evaluate RMS independently for ESP and TDF for rounded charges
rnd_hchie = compute_chi(e_btb, e_atb, e_ata, rnd_ch, ngood_grid_pts)
rnd_hchib = compute_chi(b_btb, b_atb, b_ata, rnd_ch, tdf_nequations)
# write results for this weight
res_f.write(str(gamma))
fmt = "{:>"+ str(rnd+4) + "." + str(rnd) + "f}, "
for val in rnd_ch:
res_f.write(fmt.format(val))
res_f.write(" # " + str(ngood_grid_pts) + "\n")
# after first iteration, check if minimum distance criterion is met
# i.e., min(abs(RRMS_E-RRMS_T))
delta = np.abs(current_hge-current_hgb)
if w > 0:
if (delta < delta_pre):
delta_res = [[rnd_ch,rnd], w, delta, rnd_hchib/rchi_b, rnd_hchie/rchi_e, uc_rnd_ch]
else:
delta_res = [[rnd_ch,rnd], w, delta, rnd_hchib/rchi_b, rnd_hchie/rchi_e, uc_rnd_ch]
delta_pre = delta
res_lf.close()
res_u.close()
res_f.close()
# final results:
# chose between "autoweight" or "min(abs(RRMS_E-RRMS_T))" criterion
# NOTE: true_res[0] is a list:
# where true_res[0][0] is a list containing the rounded charges
# and true_res[0][1] is the final number of decimal places used for the rounding
true_res = delta_res
stop_kind = "min(abs(RRMS_E-RRMS_T))"
if auto_res and delta_res[1] > auto_res[1]:
true_res = auto_res
stop_kind = "RRMS_E degradation larger than RRMS_T improvement"
# case of single imposed weight
true_tdf_w = true_res[1]*dt.w_incr
if dt.nw_loops == 1:
true_tdf_w = weight_b
stop_kind = "chosen by the user"
print "Stopping due to: ", stop_kind
print "at weights: ESP ", 1.0-true_tdf_w, " TDF ", true_tdf_w
print "Final rounded charges: "
fmt = "{:<4s}={:>" + str(6+true_res[0][1]) + "." + str(true_res[0][1]) + "f}"
for k,knd in enumerate(dt.atoms_per_par.keys()):
print fmt.format(knd, true_res[0][0][k])
print "ESP RRMS: ", true_res[4]
print "TDF RRMS: ", true_res[3]
if abs(true_res[5]) > Tiny:
print "WARNING! Unit cell charge: ", true_res[5], "\n"
# write final rounded charges
res_h.write(str(gamma))
fmt = "{:>" + str(4+true_res[0][1]) + "." + str(true_res[0][1]) + "f}, "
for val in true_res[0][0]:
res_h.write(fmt.format(val))
res_h.write(" # " + str(true_res[1]) + " " + str(ngood_grid_pts) + "\n")
# needed because outside the weights loop
if ("ESP+TDF" == dt.fit_kind):
res_h.close()
if __name__ == "__main__":
print "Started on: ", datetime.today().strftime("%Y-%m-%d %H:%M:%S")
print "Current working directory is: ", getcwd()
print "Pid is: ", getpid()
print ""
start_time = time()
main()
print ""
print "Ended on: ", datetime.today().strftime("%Y-%m-%d %H:%M:%S")
print "Run for seconds: ",time()-start_time
print ""
print "========================================================================"
print "If you found this program useful, please cite: "
print "M. Sant, A. Gabrieli, P. Demontis, G. B. Suffritti"
print "Comput. Phys. Commun., accepted manuscript (2015)"
print "http://dx.doi.org/10.1016/j.cpc.2015.10.005"
print ""
print "A. Gabrieli, M. Sant, P. Demontis, G. B. Suffritti"
print "J. Chem. Theory Comput. 11 (2015) pp 3829-3843"
print "http://dx.doi.org/10.1021/acs.jctc.5b00503"
print " "
print "Together with the original REPEAT work: "
print "C. Campana, B. Mussard, T. K. Woo"
print "J. Chem. Theory Comput. 5 (2009) 2866-2878"
print "http://dx.doi.org/10.1021/ct9003405"
print ""
print "The logo has been generated with figlet: http://www.figlet.org/"
print ""
print "========================================================================"
print "InfiniCharges comes with ABSOLUTELY NO WARRANTY. "
print ""
| cpctools/infinicharges | InfiniCharges.py | Python | gpl-3.0 | 51,513 | [
"CP2K"
] | e46600886fa6e930af44f366e98389e961e92503552d5d378b081133b123074e |
#!/usr/bin/python
#Audio Tools, a module and set of tools for manipulating audio data
#Copyright (C) 2007-2012 Brian Langenberger
#This program is free software; you can redistribute it and/or modify
#it under the terms of the GNU General Public License as published by
#the Free Software Foundation; either version 2 of the License, or
#(at your option) any later version.
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#You should have received a copy of the GNU General Public License
#along with this program; if not, write to the Free Software
#Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
from . import (MetaData, Image, InvalidImage)
import codecs
from id3v1 import ID3v1Comment
def is_latin_1(unicode_string):
"""returns True if the given unicode string is a subset of latin-1"""
return frozenset(unicode_string).issubset(
frozenset(map(unichr, range(32, 127) + range(160, 256))))
class UCS2Codec(codecs.Codec):
"""a special unicode codec for UCS-2
this is a subset of UTF-16 with no support for surrogate pairs,
limiting it to U+0000-U+FFFF"""
@classmethod
def fix_char(cls, c):
"""a filter which changes overly large c values to 'unknown'"""
if (ord(c) <= 0xFFFF):
return c
else:
return u"\ufffd"
def encode(self, input, errors='strict'):
"""encodes unicode input to plain UCS-2 strings"""
return codecs.utf_16_encode(u"".join(map(self.fix_char, input)),
errors)
def decode(self, input, errors='strict'):
"""decodes plain UCS-2 strings to unicode"""
(chars, size) = codecs.utf_16_decode(input, errors, True)
return (u"".join(map(self.fix_char, chars)), size)
class UCS2CodecStreamWriter(UCS2Codec, codecs.StreamWriter):
pass
class UCS2CodecStreamReader(UCS2Codec, codecs.StreamReader):
pass
def __reg_ucs2__(name):
if (name == 'ucs2'):
return (UCS2Codec().encode,
UCS2Codec().decode,
UCS2CodecStreamReader,
UCS2CodecStreamWriter)
else:
return None
codecs.register(__reg_ucs2__)
def decode_syncsafe32(reader):
"""returns a syncsafe32 integer from a BitstreamReader"""
from operator import or_
return reduce(or_,
[size << (7 * (3 - i))
for (i, size) in
enumerate(reader.parse("1p 7u 1p 7u 1p 7u 1p 7u"))])
def encode_syncsafe32(writer, value):
"""writes a syncsafe32 integer to a BitstreamWriter"""
writer.build("1p 7u 1p 7u 1p 7u 1p 7u",
[(value >> (7 * i)) & 0x7F for i in [3, 2, 1, 0]])
class C_string:
TERMINATOR = {'ascii': chr(0),
'latin_1': chr(0),
'latin-1': chr(0),
'ucs2': chr(0) * 2,
'utf_16': chr(0) * 2,
'utf-16': chr(0) * 2,
'utf_16be': chr(0) * 2,
'utf-16be': chr(0) * 2,
'utf_8': chr(0),
'utf-8': chr(0)}
def __init__(self, encoding, unicode_string):
"""encoding is a string such as 'utf-8', 'latin-1', etc"""
self.encoding = encoding
self.unicode_string = unicode_string
def __repr__(self):
return "C_string(%s, %s)" % (repr(self.encoding),
repr(self.unicode_string))
def __unicode__(self):
return self.unicode_string
def __getitem__(self, char):
return self.unicode_string[char]
def __len__(self):
return len(self.unicode_string)
def __cmp__(self, c_string):
return cmp(self.unicode_string, c_string.unicode_string)
@classmethod
def parse(cls, encoding, reader):
"""returns a C_string with the given encoding string
from the given BitstreamReader
raises LookupError if encoding is unknown
raises IOError if a problem occurs reading the stream
"""
try:
terminator = cls.TERMINATOR[encoding]
terminator_size = len(terminator)
except KeyError:
raise LookupError(encoding)
s = []
char = reader.read_bytes(terminator_size)
while (char != terminator):
s.append(char)
char = reader.read_bytes(terminator_size)
return cls(encoding, "".join(s).decode(encoding, 'replace'))
def build(self, writer):
"""writes our C_string data to the given BitstreamWriter
with the appropriate terminator"""
writer.write_bytes(self.unicode_string.encode(self.encoding,
'replace'))
writer.write_bytes(self.TERMINATOR[self.encoding])
def size(self):
"""returns the length of our C string in bytes"""
return (len(self.unicode_string.encode(self.encoding, 'replace')) +
len(self.TERMINATOR[self.encoding]))
def __attrib_equals__(attributes, o1, o2):
for attrib in attributes:
if (((not hasattr(o1, attrib)) or
(not hasattr(o2, attrib)) or
(getattr(o1, attrib) != getattr(o2, attrib)))):
return False
else:
return True
#takes a pair of integers (or None) for the current and total values
#returns a unicode string of their combined pair
#for example, __number_pair__(2,3) returns u"2/3"
#whereas __number_pair__(4,0) returns u"4"
def __number_pair__(current, total):
from . import config
if (config.getboolean_default("ID3", "pad", False)):
unslashed_format = u"%2.2d"
slashed_format = u"%2.2d/%2.2d"
else:
unslashed_format = u"%d"
slashed_format = u"%d/%d"
if (current is None):
if (total is None):
return unslashed_format % (0,)
else:
return slashed_format % (0, total)
else: # current is not None
if (total is None):
return unslashed_format % (current,)
else:
return slashed_format % (current, total)
def read_id3v2_comment(filename):
"""given a filename, returns an ID3v22Comment or a subclass
for example, if the file is ID3v2.3 tagged,
this returns an ID3v23Comment
"""
from .bitstream import BitstreamReader
reader = BitstreamReader(file(filename, "rb"), 0)
reader.mark()
try:
(tag, version_major, version_minor) = reader.parse("3b 8u 8u")
if (tag != 'ID3'):
raise ValueError("invalid ID3 header")
elif (version_major == 0x2):
reader.rewind()
return ID3v22Comment.parse(reader)
elif (version_major == 0x3):
reader.rewind()
return ID3v23Comment.parse(reader)
elif (version_major == 0x4):
reader.rewind()
return ID3v24Comment.parse(reader)
else:
raise ValueError("unsupported ID3 version")
finally:
reader.unmark()
reader.close()
def skip_id3v2_comment(file):
"""seeks past an ID3v2 comment if found in the file stream
returns the number of bytes skipped
the stream must be seekable, obviously"""
from .bitstream import BitstreamReader
bytes_skipped = 0
reader = BitstreamReader(file, 0)
reader.mark()
try:
(tag_id, version_major, version_minor) = reader.parse("3b 8u 8u 8p")
except IOError, err:
reader.unmark()
raise err
if ((tag_id == 'ID3') and (version_major in (2, 3, 4))):
reader.unmark()
#parse the header
bytes_skipped += 6
tag_size = decode_syncsafe32(reader)
bytes_skipped += 4
#skip to the end of its length
reader.skip_bytes(tag_size)
bytes_skipped += tag_size
#skip any null bytes after the IDv2 tag
reader.mark()
try:
byte = reader.read(8)
while (byte == 0):
reader.unmark()
bytes_skipped += 1
reader.mark()
byte = reader.read(8)
reader.rewind()
reader.unmark()
return bytes_skipped
except IOError, err:
reader.unmark()
raise err
else:
reader.rewind()
reader.unmark()
return 0
############################################################
# ID3v2.2 Comment
############################################################
class ID3v22_Frame:
def __init__(self, frame_id, data):
self.id = frame_id
self.data = data
def copy(self):
return self.__class__(self.id, self.data)
def __repr__(self):
return "ID3v22_Frame(%s, %s)" % (repr(self.id), repr(self.data))
def raw_info(self):
if (len(self.data) > 20):
return u"%s = %s\u2026" % \
(self.id.decode('ascii', 'replace'),
u"".join([u"%2.2X" % (ord(b)) for b in self.data[0:20]]))
else:
return u"%s = %s" % \
(self.id.decode('ascii', 'replace'),
u"".join([u"%2.2X" % (ord(b)) for b in self.data]))
def __eq__(self, frame):
return __attrib_equals__(["id", "data"], self, frame)
@classmethod
def parse(cls, frame_id, frame_size, reader):
"""given a frame_id string, frame_size int and BitstreamReader
of the remaining frame data, returns a parsed ID3v2?_Frame"""
return cls(frame_id, reader.read_bytes(frame_size))
def build(self, writer):
"""writes this frame to the given BitstreamWriter
not including its frame header"""
writer.write_bytes(self.data)
def size(self):
"""returns the size of this frame in bytes
not including the frame header"""
return len(self.data)
@classmethod
def converted(cls, frame_id, o):
"""given foreign data, returns an ID3v22_Frame"""
raise NotImplementedError()
def clean(self, fixes_applied):
"""returns a cleaned ID3v22_Frame,
or None if the frame should be removed entirely
any fixes are appended to fixes_applied as unicode string"""
return self.__class__(self.id, self.data)
class ID3v22_T__Frame:
NUMERICAL_IDS = ('TRK', 'TPA')
def __init__(self, frame_id, encoding, data):
"""fields are as follows:
| frame_id | 3 byte frame ID string |
| encoding | 1 byte encoding int |
| data | text data as raw string |
"""
assert((encoding == 0) or (encoding == 1))
self.id = frame_id
self.encoding = encoding
self.data = data
def copy(self):
return self.__class__(self.id, self.encoding, self.data)
def __repr__(self):
return "ID3v22_T__Frame(%s, %s, %s)" % \
(repr(self.id), repr(self.encoding), repr(self.data))
def raw_info(self):
"""returns a human-readable version of this frame as unicode"""
return u"%s = (%s) %s" % \
(self.id.decode('ascii'),
{0: u"Latin-1", 1: u"UCS-2"}[self.encoding],
unicode(self))
def __eq__(self, frame):
return __attrib_equals__(["id", "encoding", "data"], self, frame)
def __unicode__(self):
return self.data.decode(
{0: 'latin-1', 1: 'ucs2'}[self.encoding],
'replace').split(unichr(0), 1)[0]
def number(self):
"""if the frame is numerical, returns the track/album_number portion
raises TypeError if not"""
import re
if (self.id in self.NUMERICAL_IDS):
unicode_value = unicode(self)
int_string = re.search(r'\d+', unicode_value)
if (int_string is not None):
int_value = int(int_string.group(0))
if (int_value == 0):
total_string = re.search(r'/\D*?(\d+)', unicode_value)
if (total_string is not None):
#don't return placeholder 0 value
#when a track_total value is present
#but track_number value is 0
return None
else:
return int_value
else:
return int_value
else:
return None
else:
raise TypeError()
def total(self):
"""if the frame is numerical, returns the track/album_total portion
raises TypeError if not"""
import re
if (self.id in self.NUMERICAL_IDS):
int_value = re.search(r'/\D*?(\d+)', unicode(self))
if (int_value is not None):
return int(int_value.group(1))
else:
return None
else:
raise TypeError()
@classmethod
def parse(cls, frame_id, frame_size, reader):
"""given a frame_id string, frame_size int and BitstreamReader
of the remaining frame data, returns a parsed text frame"""
encoding = reader.read(8)
return cls(frame_id, encoding, reader.read_bytes(frame_size - 1))
def build(self, writer):
"""writes the frame's data to the BitstreamWriter
not including its frame header"""
writer.build("8u %db" % (len(self.data)), (self.encoding, self.data))
def size(self):
"""returns the frame's total size
not including its frame header"""
return 1 + len(self.data)
@classmethod
def converted(cls, frame_id, unicode_string):
"""given a unicode string, returns a text frame"""
if (is_latin_1(unicode_string)):
return cls(frame_id, 0, unicode_string.encode('latin-1'))
else:
return cls(frame_id, 1, unicode_string.encode('ucs2'))
def clean(self, fixes_performed):
"""returns a cleaned frame,
or None if the frame should be removed entirely
any fixes are appended to fixes_applied as unicode string"""
from .text import (CLEAN_REMOVE_EMPTY_TAG,
CLEAN_REMOVE_TRAILING_WHITESPACE,
CLEAN_REMOVE_LEADING_WHITESPACE,
CLEAN_REMOVE_LEADING_ZEROES,
CLEAN_ADD_LEADING_ZEROES)
field = self.id.decode('ascii')
value = unicode(self)
#check for an empty tag
if (len(value.strip()) == 0):
fixes_performed.append(CLEAN_REMOVE_EMPTY_TAG %
{"field": field})
return None
#check trailing whitespace
fix1 = value.rstrip()
if (fix1 != value):
fixes_performed.append(CLEAN_REMOVE_TRAILING_WHITESPACE %
{"field": field})
#check leading whitespace
fix2 = fix1.lstrip()
if (fix2 != fix1):
fixes_performed.append(CLEAN_REMOVE_LEADING_WHITESPACE %
{"field": field})
#check leading zeroes for a numerical tag
if (self.id in self.NUMERICAL_IDS):
fix3 = __number_pair__(self.number(), self.total())
if (fix3 != fix2):
from . import config
if (config.getboolean_default("ID3", "pad", False)):
fixes_performed.append(CLEAN_ADD_LEADING_ZEROES %
{"field": field})
else:
fixes_performed.append(CLEAN_REMOVE_LEADING_ZEROES %
{"field": field})
else:
fix3 = fix2
return self.__class__.converted(self.id, fix3)
class ID3v22_TXX_Frame:
def __init__(self, encoding, description, data):
self.id = 'TXX'
self.encoding = encoding
self.description = description
self.data = data
def copy(self):
return self.__class__(self.encoding,
self.description,
self.data)
def __repr__(self):
return "ID3v22_TXX_Frame(%s, %s, %s)" % \
(repr(self.encoding), repr(self.description), repr(self.data))
def raw_info(self):
"""returns a human-readable version of this frame as unicode"""
return u"%s = (%s, \"%s\") %s" % \
(self.id,
{0: u"Latin-1", 1: u"UCS-2"}[self.encoding],
self.description,
unicode(self))
def __eq__(self, frame):
return __attrib_equals__(["id", "encoding", "description", "data"])
def __unicode__(self):
return self.data.decode(
{0: 'latin-1', 1: 'ucs2'}[self.encoding],
'replace').split(unichr(0), 1)[0]
@classmethod
def parse(cls, frame_id, frame_size, reader):
"""given a frame_id string, frame_size int and BitstreamReader
of the remaining frame data, returns a parsed text frame"""
encoding = reader.read(8)
description = C_string.parse({0: "latin-1", 1: "ucs2"}[encoding],
reader)
data = reader.read_bytes(frame_size - 1 - description.size())
return cls(encoding, description, data)
def build(self, writer):
"""writes this frame to the given BitstreamWriter
not including its frame header"""
writer.write(8, self.encoding)
self.description.build(writer)
writer.write_bytes(self.data)
def size(self):
"""returns the size of this frame in bytes
not including the frame header"""
return 1 + self.description.size() + len(self.data)
def clean(self, fixes_performed):
"""returns a cleaned frame,
or None if the frame should be removed entirely
any fixes are appended to fixes_applied as unicode string"""
from audiotools.text import (CLEAN_REMOVE_EMPTY_TAG,
CLEAN_REMOVE_TRAILING_WHITESPACE,
CLEAN_REMOVE_LEADING_WHITESPACE)
field = self.id.decode('ascii')
value = unicode(self)
#check for an empty tag
if (len(value.strip()) == 0):
fixes_performed.append(CLEAN_REMOVE_EMPTY_TAG %
{"field": field})
return None
#check trailing whitespace
fix1 = value.rstrip()
if (fix1 != value):
fixes_performed.append(CLEAN_REMOVE_TRAILING_WHITESPACE %
{"field": field})
#check leading whitespace
fix2 = fix1.lstrip()
if (fix2 != fix1):
fixes_performed.append(CLEAN_REMOVE_LEADING_WHITESPACE %
{"field": field})
return self.__class__(self.encoding, self.description, fix2)
class ID3v22_W__Frame:
def __init__(self, frame_id, data):
self.id = frame_id
self.data = data
def copy(self):
return self.__class__(self.id, self.data)
def __repr__(self):
return "ID3v22_W__Frame(%s, %s)" % \
(repr(self.id), repr(self.data))
def raw_info(self):
"""returns a human-readable version of this frame as unicode"""
return u"%s = %s" % (self.id.decode('ascii'),
self.data.decode('ascii', 'replace'))
def __eq__(self, frame):
return __attrib_equals__(["id", "data"], self, frame)
@classmethod
def parse(cls, frame_id, frame_size, reader):
return cls(frame_id, reader.read_bytes(frame_size))
def build(self, writer):
"""writes this frame to the given BitstreamWriter
not including its frame header"""
writer.write_bytes(self.data)
def size(self):
"""returns the size of this frame in bytes
not including the frame header"""
return len(self.data)
def clean(self, fixes_applied):
"""returns a cleaned frame,
or None if the frame should be removed entirely
any fixes are appended to fixes_applied as unicode string"""
return self.__class__(self.id, self.data)
class ID3v22_WXX_Frame:
def __init__(self, encoding, description, data):
self.id = 'WXX'
self.encoding = encoding
self.description = description
self.data = data
def copy(self):
return self.__class__(self.encoding,
self.description,
self.data)
def __repr__(self):
return "ID3v22_WXX_Frame(%s, %s, %s)" % \
(repr(self.encoding), repr(self.description), repr(self.data))
def raw_info(self):
"""returns a human-readable version of this frame as unicode"""
return u"%s = (%s, \"%s\") %s" % \
(self.id,
{0: u"Latin-1", 1: u"UCS-2"}[self.encoding],
self.description,
self.data.decode('ascii', 'replace'))
def __eq__(self, frame):
return __attrib_equals__(["id", "encoding", "description", "data"])
@classmethod
def parse(cls, frame_id, frame_size, reader):
"""given a frame_id string, frame_size int and BitstreamReader
of the remaining frame data, returns a parsed text frame"""
encoding = reader.read(8)
description = C_string.parse({0: "latin-1", 1: "ucs2"}[encoding],
reader)
data = reader.read_bytes(frame_size - 1 - description.size())
return cls(encoding, description, data)
def build(self, writer):
"""writes this frame to the given BitstreamWriter
not including its frame header"""
writer.write(8, self.encoding)
self.description.build(writer)
writer.write_bytes(self.data)
def size(self):
"""returns the size of this frame in bytes
not including the frame header"""
return 1 + self.description.size() + len(self.data)
def clean(self, fixes_performed):
"""returns a cleaned frame,
or None if the frame should be removed entirely
any fixes are appended to fixes_applied as unicode string"""
return self.__class__(self.encoding,
self.description,
self.data)
class ID3v22_COM_Frame:
def __init__(self, encoding, language, short_description, data):
"""fields are as follows:
| encoding | 1 byte int of the comment's text encoding |
| language | 3 byte string of the comment's language |
| short_description | C_string of a short description |
| data | plain string of the comment data itself |
"""
self.id = "COM"
self.encoding = encoding
self.language = language
self.short_description = short_description
self.data = data
def copy(self):
return self.__class__(self.encoding,
self.language,
self.short_description,
self.data)
def __repr__(self):
return "ID3v22_COM_Frame(%s, %s, %s, %s)" % \
(repr(self.encoding), repr(self.language),
repr(self.short_description), repr(self.data))
def raw_info(self):
"""returns a human-readable version of this frame as unicode"""
return u"COM = (%s, %s, \"%s\") %s" % \
({0: u'Latin-1', 1: 'UCS-2'}[self.encoding],
self.language.decode('ascii', 'replace'),
self.short_description,
self.data.decode({0: 'latin-1', 1: 'ucs2'}[self.encoding]))
def __eq__(self, frame):
return __attrib_equals__(["encoding",
"language",
"short_description",
"data"], self, frame)
def __unicode__(self):
return self.data.decode({0: 'latin-1', 1: 'ucs2'}[self.encoding],
'replace')
@classmethod
def parse(cls, frame_id, frame_size, reader):
"""given a frame_id string, frame_size int and BitstreamReader
of the remaining frame data, returns a parsed ID3v22_COM_Frame"""
(encoding, language) = reader.parse("8u 3b")
short_description = C_string.parse({0: 'latin-1', 1: 'ucs2'}[encoding],
reader)
data = reader.read_bytes(frame_size - (4 + short_description.size()))
return cls(encoding, language, short_description, data)
def build(self, writer):
"""writes this frame to the given BitstreamWriter
not including its frame header"""
writer.build("8u 3b", (self.encoding, self.language))
self.short_description.build(writer)
writer.write_bytes(self.data)
def size(self):
"""returns the size of this frame in bytes
not including the frame header"""
return 4 + self.short_description.size() + len(self.data)
@classmethod
def converted(cls, frame_id, unicode_string):
if (is_latin_1(unicode_string)):
return cls(0, "eng", C_string("latin-1", u""),
unicode_string.encode('latin-1'))
else:
return cls(1, "eng", C_string("ucs2", u""),
unicode_string.encode('ucs2'))
def clean(self, fixes_performed):
"""returns a cleaned frame of the same class
or None if the frame should be omitted
fix text will be appended to fixes_performed, if necessary"""
from audiotools.text import (CLEAN_REMOVE_EMPTY_TAG,
CLEAN_REMOVE_TRAILING_WHITESPACE,
CLEAN_REMOVE_LEADING_WHITESPACE)
field = self.id.decode('ascii')
text_encoding = {0: 'latin-1', 1: 'ucs2'}
value = self.data.decode(text_encoding[self.encoding], 'replace')
#check for an empty tag
if (len(value.strip()) == 0):
fixes_performed.append(CLEAN_REMOVE_EMPTY_TAG %
{"field": field})
return None
#check trailing whitespace
fix1 = value.rstrip()
if (fix1 != value):
fixes_performed.append(CLEAN_REMOVE_TRAILING_WHITESPACE %
{"field": field})
#check leading whitespace
fix2 = fix1.lstrip()
if (fix2 != fix1):
fixes_performed.append(CLEAN_REMOVE_LEADING_WHITESPACE %
{"field": field})
#stripping whitespace shouldn't alter text/description encoding
return self.__class__(self.encoding,
self.language,
self.short_description,
fix2.encode(text_encoding[self.encoding]))
class ID3v22_PIC_Frame(Image):
def __init__(self, image_format, picture_type, description, data):
"""fields are as follows:
| image_format | a 3 byte image format, such as 'JPG' |
| picture_type | a 1 byte field indicating front cover, etc. |
| description | a description of the image as a C_string |
| data | image data itself as a raw string |
"""
self.id = 'PIC'
#add PIC-specific fields
self.pic_format = image_format
self.pic_type = picture_type
self.pic_description = description
#figure out image metrics from raw data
try:
metrics = Image.new(data, u'', 0)
except InvalidImage:
metrics = Image(data=data, mime_type=u'',
width=0, height=0, color_depth=0, color_count=0,
description=u'', type=0)
#then initialize Image parent fields from metrics
self.mime_type = metrics.mime_type
self.width = metrics.width
self.height = metrics.height
self.color_depth = metrics.color_depth
self.color_count = metrics.color_count
self.data = data
def copy(self):
return ID3v22_PIC_Frame(self.pic_format,
self.pic_type,
self.pic_description,
self.data)
def __repr__(self):
return "ID3v22_PIC_Frame(%s, %s, %s, ...)" % \
(repr(self.pic_format), repr(self.pic_type),
repr(self.pic_description))
def raw_info(self):
"""returns a human-readable version of this frame as unicode"""
return u"PIC = (%s, %d\u00D7%d, %s, \"%s\") %d bytes" % \
(self.type_string(),
self.width,
self.height,
self.mime_type,
self.pic_description,
len(self.data))
def type_string(self):
return {0: "Other",
1: "32x32 pixels 'file icon' (PNG only)",
2: "Other file icon",
3: "Cover (front)",
4: "Cover (back)",
5: "Leaflet page",
6: "Media (e.g. label side of CD)",
7: "Lead artist/lead performer/soloist",
8: "Artist / Performer",
9: "Conductor",
10: "Band / Orchestra",
11: "Composer",
12: "Lyricist / Text writer",
13: "Recording Location",
14: "During recording",
15: "During performance",
16: "Movie/Video screen capture",
17: "A bright coloured fish",
18: "Illustration",
19: "Band/Artist logotype",
20: "Publisher/Studio logotype"}.get(self.pic_type, "Other")
def __getattr__(self, attr):
if (attr == 'type'):
return {3: 0, # front cover
4: 1, # back cover
5: 2, # leaflet page
6: 3 # media
}.get(self.pic_type, 4) # other
elif (attr == 'description'):
return unicode(self.pic_description)
else:
raise AttributeError(attr)
def __setattr__(self, attr, value):
if (attr == 'type'):
self.__dict__["pic_type"] = {0: 3, # front cover
1: 4, # back cover
2: 5, # leaflet page
3: 6, # media
}.get(value, 0) # other
elif (attr == 'description'):
if (is_latin_1(value)):
self.__dict__["pic_description"] = C_string('latin-1', value)
else:
self.__dict__["pic_description"] = C_string('ucs2', value)
else:
self.__dict__[attr] = value
@classmethod
def parse(cls, frame_id, frame_size, reader):
(encoding, image_format, picture_type) = reader.parse("8u 3b 8u")
description = C_string.parse({0: 'latin-1',
1: 'ucs2'}[encoding], reader)
data = reader.read_bytes(frame_size - (5 + description.size()))
return cls(image_format,
picture_type,
description,
data)
def build(self, writer):
"""writes this frame to the given BitstreamWriter
not including its frame header"""
writer.build("8u 3b 8u", ({'latin-1': 0,
'ucs2': 1}[self.pic_description.encoding],
self.pic_format,
self.pic_type))
self.pic_description.build(writer)
writer.write_bytes(self.data)
def size(self):
"""returns the size of this frame in bytes
not including the frame header"""
return (5 + self.pic_description.size() + len(self.data))
@classmethod
def converted(cls, frame_id, image):
if (is_latin_1(image.description)):
description = C_string('latin-1', image.description)
else:
description = C_string('ucs2', image.description)
return cls(image_format={u"image/png": u"PNG",
u"image/jpeg": u"JPG",
u"image/jpg": u"JPG",
u"image/x-ms-bmp": u"BMP",
u"image/gif": u"GIF",
u"image/tiff": u"TIF"}.get(image.mime_type,
'UNK'),
picture_type={0: 3, # front cover
1: 4, # back cover
2: 5, # leaflet page
3: 6, # media
}.get(image.type, 0), # other
description=description,
data=image.data)
def clean(self, fixes_performed):
"""returns a cleaned ID3v22_PIC_Frame,
or None if the frame should be removed entirely
any fixes are appended to fixes_applied as unicode string"""
#all the fields are derived from the image data
#so there's no need to test for a mismatch
#not sure if it's worth testing for bugs in the description
#or format fields
return ID3v22_PIC_Frame(self.pic_format,
self.pic_type,
self.pic_description,
self.data)
class ID3v22Comment(MetaData):
NAME = u'ID3v2.2'
ATTRIBUTE_MAP = {'track_name': 'TT2',
'track_number': 'TRK',
'track_total': 'TRK',
'album_name': 'TAL',
'artist_name': 'TP1',
'performer_name': 'TP2',
'conductor_name': 'TP3',
'composer_name': 'TCM',
'media': 'TMT',
'ISRC': 'TRC',
'copyright': 'TCR',
'publisher': 'TPB',
'year': 'TYE',
'date': 'TRD',
'album_number': 'TPA',
'album_total': 'TPA',
'comment': 'COM'}
RAW_FRAME = ID3v22_Frame
TEXT_FRAME = ID3v22_T__Frame
USER_TEXT_FRAME = ID3v22_TXX_Frame
WEB_FRAME = ID3v22_W__Frame
USER_WEB_FRAME = ID3v22_WXX_Frame
COMMENT_FRAME = ID3v22_COM_Frame
IMAGE_FRAME = ID3v22_PIC_Frame
IMAGE_FRAME_ID = 'PIC'
def __init__(self, frames):
self.__dict__["frames"] = frames[:]
def copy(self):
return self.__class__([frame.copy() for frame in self])
def __repr__(self):
return "ID3v22Comment(%s)" % (repr(self.frames))
def __iter__(self):
return iter(self.frames)
def raw_info(self):
"""returns a human-readable version of this frame as unicode"""
from os import linesep
return linesep.decode('ascii').join(
["%s:" % (self.NAME)] +
[frame.raw_info() for frame in self])
@classmethod
def parse(cls, reader):
"""given a BitstreamReader, returns a parsed ID3v22Comment"""
(id3,
major_version,
minor_version,
flags) = reader.parse("3b 8u 8u 8u")
if (id3 != 'ID3'):
raise ValueError("invalid ID3 header")
elif (major_version != 0x02):
raise ValueError("invalid major version")
elif (minor_version != 0x00):
raise ValueError("invalid minor version")
total_size = decode_syncsafe32(reader)
frames = []
while (total_size > 0):
(frame_id, frame_size) = reader.parse("3b 24u")
if (frame_id == chr(0) * 3):
break
elif (frame_id == 'TXX'):
frames.append(
cls.USER_TEXT_FRAME.parse(
frame_id, frame_size, reader.substream(frame_size)))
elif (frame_id == 'WXX'):
frames.append(
cls.USER_WEB_FRAME.parse(
frame_id, frame_size, reader.substream(frame_size)))
elif (frame_id == 'COM'):
frames.append(
cls.COMMENT_FRAME.parse(
frame_id, frame_size, reader.substream(frame_size)))
elif (frame_id == 'PIC'):
frames.append(
cls.IMAGE_FRAME.parse(
frame_id, frame_size, reader.substream(frame_size)))
elif (frame_id.startswith('T')):
frames.append(
cls.TEXT_FRAME.parse(
frame_id, frame_size, reader.substream(frame_size)))
elif (frame_id.startswith('W')):
frames.append(
cls.WEB_FRAME.parse(
frame_id, frame_size, reader.substream(frame_size)))
else:
frames.append(
cls.RAW_FRAME.parse(
frame_id, frame_size, reader.substream(frame_size)))
total_size -= (6 + frame_size)
return cls(frames)
def build(self, writer):
"""writes the complete ID3v22Comment data
to the given BitstreamWriter"""
from operator import add
writer.build("3b 8u 8u 8u", ("ID3", 0x02, 0x00, 0x00))
encode_syncsafe32(writer,
reduce(add, [6 + frame.size() for frame in self], 0))
for frame in self:
writer.build("3b 24u", (frame.id, frame.size()))
frame.build(writer)
def size(self):
"""returns the total size of the ID3v22Comment, including its header"""
from operator import add
return reduce(add, [6 + frame.size() for frame in self], 10)
def __len__(self):
return len(self.frames)
def __getitem__(self, frame_id):
frames = [frame for frame in self if (frame.id == frame_id)]
if (len(frames) > 0):
return frames
else:
raise KeyError(frame_id)
def __setitem__(self, frame_id, frames):
new_frames = frames[:]
updated_frames = []
for old_frame in self:
if (old_frame.id == frame_id):
try:
#replace current frame with newly set frame
updated_frames.append(new_frames.pop(0))
except IndexError:
#no more newly set frames, so remove current frame
continue
else:
#passthrough unmatched frames
updated_frames.append(old_frame)
else:
#append any leftover frames
for new_frame in new_frames:
updated_frames.append(new_frame)
self.__dict__["frames"] = updated_frames
def __delitem__(self, frame_id):
updated_frames = [frame for frame in self if frame.id != frame_id]
if (len(updated_frames) < len(self)):
self.__dict__["frames"] = updated_frames
else:
raise KeyError(frame_id)
def keys(self):
return list(set([frame.id for frame in self]))
def values(self):
return [self[key] for key in self.keys()]
def items(self):
return [(key, self[key]) for key in self.keys()]
def __getattr__(self, attr):
if (attr in self.ATTRIBUTE_MAP):
try:
frame = self[self.ATTRIBUTE_MAP[attr]][0]
if (attr in ('track_number', 'album_number')):
return frame.number()
elif (attr in ('track_total', 'album_total')):
return frame.total()
else:
return unicode(frame)
except KeyError:
return None
elif (attr in self.FIELDS):
return None
else:
raise AttributeError(attr)
def __setattr__(self, attr, value):
if (attr in self.ATTRIBUTE_MAP):
if (value is not None):
import re
frame_id = self.ATTRIBUTE_MAP[attr]
if (attr == 'track_number'):
try:
new_frame = self.TEXT_FRAME.converted(
frame_id,
re.sub(r'\d+',
unicode(int(value)),
unicode(self[frame_id][0]),
1))
except KeyError:
new_frame = self.TEXT_FRAME.converted(
frame_id,
__number_pair__(value, self.track_total))
elif (attr == 'track_total'):
try:
if (re.search(r'/\D*\d+',
unicode(self[frame_id][0])) is not None):
new_frame = self.TEXT_FRAME.converted(
frame_id,
re.sub(r'(/\D*)(\d+)',
u"\\g<1>" + unicode(int(value)),
unicode(self[frame_id][0]),
1))
else:
new_frame = self.TEXT_FRAME.converted(
frame_id,
u"%s/%d" % (unicode(self[frame_id][0]),
int(value)))
except KeyError:
new_frame = self.TEXT_FRAME.converted(
frame_id,
__number_pair__(self.track_number, value))
elif (attr == 'album_number'):
try:
new_frame = self.TEXT_FRAME.converted(
frame_id,
re.sub(r'\d+',
unicode(int(value)),
unicode(self[frame_id][0]),
1))
except KeyError:
new_frame = self.TEXT_FRAME.converted(
frame_id,
__number_pair__(value, self.album_total))
elif (attr == 'album_total'):
try:
if (re.search(r'/\D*\d+',
unicode(self[frame_id][0])) is not None):
new_frame = self.TEXT_FRAME.converted(
frame_id,
re.sub(r'(/\D*)(\d+)',
u"\\g<1>" + unicode(int(value)),
unicode(self[frame_id][0]),
1))
else:
new_frame = self.TEXT_FRAME.converted(
frame_id,
u"%s/%d" % (unicode(self[frame_id][0]),
int(value)))
except KeyError:
new_frame = self.TEXT_FRAME.converted(
frame_id,
__number_pair__(self.album_number, value))
elif (attr == 'comment'):
new_frame = self.COMMENT_FRAME.converted(
frame_id, value)
else:
new_frame = self.TEXT_FRAME.converted(
frame_id, unicode(value))
try:
self[frame_id] = [new_frame] + self[frame_id][1:]
except KeyError:
self[frame_id] = [new_frame]
else:
delattr(self, attr)
elif (attr in MetaData.FIELDS):
pass
else:
self.__dict__[attr] = value
def __delattr__(self, attr):
if (attr in self.ATTRIBUTE_MAP):
updated_frames = []
delete_frame_id = self.ATTRIBUTE_MAP[attr]
for frame in self:
if (frame.id == delete_frame_id):
if ((attr == 'track_number') or (attr == 'album_number')):
import re
#if *_number field contains a slashed total
if (re.search(r'\d+.*?/.*?\d+',
unicode(frame)) is not None):
#replace unslashed portion with 0
updated_frames.append(
self.TEXT_FRAME.converted(
frame.id,
re.sub(r'\d+',
unicode(int(0)),
unicode(frame),
1)))
else:
#otherwise, remove *_number field
continue
elif ((attr == 'track_total') or
(attr == 'album_total')):
import re
#if *_number is nonzero
_number = re.search(r'\d+',
unicode(frame).split(u"/")[0])
if (((_number is not None) and
(int(_number.group(0)) != 0))):
#if field contains a slashed total
#remove slashed total from field
updated_frames.append(
self.TEXT_FRAME.converted(
frame.id,
re.sub(r'\s*/\D*\d+.*',
u"",
unicode(frame),
1)))
else:
#if field contains a slashed total
#remove field entirely
if (re.search(r'/.*?\d+',
unicode(frame)) is not None):
continue
else:
#no number or total,
#so pass frame through unchanged
updated_frames.append(frame)
else:
#handle the textual fields
#which are simply deleted outright
continue
else:
updated_frames.append(frame)
self.__dict__["frames"] = updated_frames
elif (attr in MetaData.FIELDS):
#ignore deleted attributes which are in MetaData
#but we don't support
pass
else:
try:
del(self.__dict__[attr])
except KeyError:
raise AttributeError(attr)
def images(self):
return [frame for frame in self if (frame.id == self.IMAGE_FRAME_ID)]
def add_image(self, image):
self.frames.append(
self.IMAGE_FRAME.converted(self.IMAGE_FRAME_ID, image))
def delete_image(self, image):
self.__dict__["frames"] = [frame for frame in self if
((frame.id != self.IMAGE_FRAME_ID) or
(frame != image))]
@classmethod
def converted(cls, metadata):
"""converts a MetaData object to an ID3v2*Comment object"""
if (metadata is None):
return None
elif (cls is metadata.__class__):
return cls([frame.copy() for frame in metadata])
frames = []
for (attr, key) in cls.ATTRIBUTE_MAP.items():
value = getattr(metadata, attr)
if ((attr not in cls.INTEGER_FIELDS) and (value is not None)):
if (attr == 'comment'):
frames.append(cls.COMMENT_FRAME.converted(key, value))
else:
frames.append(cls.TEXT_FRAME.converted(key, value))
if (((metadata.track_number is not None) or
(metadata.track_total is not None))):
frames.append(
cls.TEXT_FRAME.converted(
cls.ATTRIBUTE_MAP["track_number"],
__number_pair__(metadata.track_number,
metadata.track_total)))
if (((metadata.album_number is not None) or
(metadata.album_total is not None))):
frames.append(
cls.TEXT_FRAME.converted(
cls.ATTRIBUTE_MAP["album_number"],
__number_pair__(metadata.album_number,
metadata.album_total)))
for image in metadata.images():
frames.append(cls.IMAGE_FRAME.converted(cls.IMAGE_FRAME_ID, image))
if (hasattr(cls, 'ITUNES_COMPILATION_ID')):
frames.append(
cls.TEXT_FRAME.converted(
cls.ITUNES_COMPILATION_ID, u'1'))
return cls(frames)
def clean(self, fixes_performed):
"""returns a new MetaData object that's been cleaned of problems"""
return self.__class__([filtered_frame for filtered_frame in
[frame.clean(fixes_performed) for frame in self]
if filtered_frame is not None])
############################################################
# ID3v2.3 Comment
############################################################
class ID3v23_Frame(ID3v22_Frame):
def __repr__(self):
return "ID3v23_Frame(%s, %s)" % (repr(self.id), repr(self.data))
class ID3v23_T___Frame(ID3v22_T__Frame):
NUMERICAL_IDS = ('TRCK', 'TPOS')
def __repr__(self):
return "ID3v23_T___Frame(%s, %s, %s)" % \
(repr(self.id), repr(self.encoding), repr(self.data))
class ID3v23_TXXX_Frame(ID3v22_TXX_Frame):
def __init__(self, encoding, description, data):
self.id = 'TXXX'
self.encoding = encoding
self.description = description
self.data = data
def __repr__(self):
return "ID3v23_TXXX_Frame(%s, %s, %s)" % \
(repr(self.encoding), repr(self.description), repr(self.data))
class ID3v23_W___Frame(ID3v22_W__Frame):
def __repr__(self):
return "ID3v23_W___Frame(%s, %s)" % \
(repr(self.id), repr(self.data))
class ID3v23_WXXX_Frame(ID3v22_WXX_Frame):
def __init__(self, encoding, description, data):
self.id = 'WXXX'
self.encoding = encoding
self.description = description
self.data = data
def __repr__(self):
return "ID3v23_WXXX_Frame(%s, %s, %s)" % \
(repr(self.encoding), repr(self.description), repr(self.data))
class ID3v23_APIC_Frame(ID3v22_PIC_Frame):
def __init__(self, mime_type, picture_type, description, data):
"""fields are as follows:
| mime_type | a C_string of the image's MIME type |
| picture_type | a 1 byte field indicating front cover, etc. |
| description | a description of the image as a C_string |
| data | image data itself as a raw string |
"""
self.id = 'APIC'
#add APIC-specific fields
self.pic_type = picture_type
self.pic_description = description
self.pic_mime_type = mime_type
#figure out image metrics from raw data
try:
metrics = Image.new(data, u'', 0)
except InvalidImage:
metrics = Image(data=data, mime_type=u'',
width=0, height=0, color_depth=0, color_count=0,
description=u'', type=0)
#then initialize Image parent fields from metrics
self.width = metrics.width
self.height = metrics.height
self.color_depth = metrics.color_depth
self.color_count = metrics.color_count
self.data = data
def copy(self):
return self.__class__(self.pic_mime_type,
self.pic_type,
self.pic_description,
self.data)
def __repr__(self):
return "ID3v23_APIC_Frame(%s, %s, %s, ...)" % \
(repr(self.pic_mime_type), repr(self.pic_type),
repr(self.pic_description))
def raw_info(self):
"""returns a human-readable version of this frame as unicode"""
return u"APIC = (%s, %d\u00D7%d, %s, \"%s\") %d bytes" % \
(self.type_string(),
self.width,
self.height,
self.pic_mime_type,
self.pic_description,
len(self.data))
def __getattr__(self, attr):
if (attr == 'type'):
return {3: 0, # front cover
4: 1, # back cover
5: 2, # leaflet page
6: 3 # media
}.get(self.pic_type, 4) # other
elif (attr == 'description'):
return unicode(self.pic_description)
elif (attr == 'mime_type'):
return unicode(self.pic_mime_type)
else:
raise AttributeError(attr)
def __setattr__(self, attr, value):
if (attr == 'type'):
self.__dict__["pic_type"] = {0: 3, # front cover
1: 4, # back cover
2: 5, # leaflet page
3: 6, # media
}.get(value, 0) # other
elif (attr == 'description'):
if (is_latin_1(value)):
self.__dict__["pic_description"] = C_string('latin-1', value)
else:
self.__dict__["pic_description"] = C_string('ucs2', value)
elif (attr == 'mime_type'):
self.__dict__["pic_mime_type"] = C_string('ascii', value)
else:
self.__dict__[attr] = value
@classmethod
def parse(cls, frame_id, frame_size, reader):
"""parses this frame from the given BitstreamReader"""
encoding = reader.read(8)
mime_type = C_string.parse('ascii', reader)
picture_type = reader.read(8)
description = C_string.parse({0: 'latin-1',
1: 'ucs2'}[encoding], reader)
data = reader.read_bytes(frame_size - (1 +
mime_type.size() +
1 +
description.size()))
return cls(mime_type, picture_type, description, data)
def build(self, writer):
"""writes this frame to the given BitstreamWriter
not including its frame header"""
writer.write(8, {'latin-1': 0,
'ucs2': 1}[self.pic_description.encoding])
self.pic_mime_type.build(writer)
writer.write(8, self.pic_type)
self.pic_description.build(writer)
writer.write_bytes(self.data)
def size(self):
"""returns the size of this frame in bytes
not including the frame header"""
return (1 +
self.pic_mime_type.size() +
1 +
self.pic_description.size() +
len(self.data))
@classmethod
def converted(cls, frame_id, image):
if (is_latin_1(image.description)):
description = C_string('latin-1', image.description)
else:
description = C_string('ucs2', image.description)
return cls(mime_type=C_string('ascii', image.mime_type),
picture_type={0: 3, # front cover
1: 4, # back cover
2: 5, # leaflet page
3: 6, # media
}.get(image.type, 0), # other
description=description,
data=image.data)
def clean(self, fixes_performed):
"""returns a cleaned ID3v23_APIC_Frame,
or None if the frame should be removed entirely
any fixes are appended to fixes_applied as unicode string"""
actual_mime_type = Image.new(self.data, u"", 0).mime_type
if (unicode(self.pic_mime_type) != actual_mime_type):
from audiotools.text import (CLEAN_FIX_IMAGE_FIELDS)
fixes_performed.append(CLEAN_FIX_IMAGE_FIELDS)
return ID3v23_APIC_Frame(
C_string('ascii', actual_mime_type.encode('ascii')),
self.pic_type,
self.pic_description,
self.data)
else:
return ID3v23_APIC_Frame(
self.pic_mime_type,
self.pic_type,
self.pic_description,
self.data)
class ID3v23_COMM_Frame(ID3v22_COM_Frame):
def __init__(self, encoding, language, short_description, data):
"""fields are as follows:
| encoding | 1 byte int of the comment's text encoding |
| language | 3 byte string of the comment's language |
| short_description | C_string of a short description |
| data | plain string of the comment data itself |
"""
self.id = "COMM"
self.encoding = encoding
self.language = language
self.short_description = short_description
self.data = data
def __repr__(self):
return "ID3v23_COMM_Frame(%s, %s, %s, %s)" % \
(repr(self.encoding), repr(self.language),
repr(self.short_description), repr(self.data))
def raw_info(self):
"""returns a human-readable version of this frame as unicode"""
return u"COMM = (%s, %s, \"%s\") %s" % \
({0: u'Latin-1', 1: 'UCS-2'}[self.encoding],
self.language.decode('ascii', 'replace'),
self.short_description,
self.data.decode({0: 'latin-1', 1: 'ucs2'}[self.encoding]))
class ID3v23Comment(ID3v22Comment):
NAME = u'ID3v2.3'
ATTRIBUTE_MAP = {'track_name': 'TIT2',
'track_number': 'TRCK',
'track_total': 'TRCK',
'album_name': 'TALB',
'artist_name': 'TPE1',
'performer_name': 'TPE2',
'composer_name': 'TCOM',
'conductor_name': 'TPE3',
'media': 'TMED',
'ISRC': 'TSRC',
'copyright': 'TCOP',
'publisher': 'TPUB',
'year': 'TYER',
'date': 'TRDA',
'album_number': 'TPOS',
'album_total': 'TPOS',
'comment': 'COMM'}
RAW_FRAME = ID3v23_Frame
TEXT_FRAME = ID3v23_T___Frame
WEB_FRAME = ID3v23_W___Frame
USER_TEXT_FRAME = ID3v23_TXXX_Frame
USER_WEB_FRAME = ID3v23_WXXX_Frame
COMMENT_FRAME = ID3v23_COMM_Frame
IMAGE_FRAME = ID3v23_APIC_Frame
IMAGE_FRAME_ID = 'APIC'
ITUNES_COMPILATION_ID = 'TCMP'
def __repr__(self):
return "ID3v23Comment(%s)" % (repr(self.frames))
@classmethod
def parse(cls, reader):
"""given a BitstreamReader, returns a parsed ID3v23Comment"""
(id3,
major_version,
minor_version,
flags) = reader.parse("3b 8u 8u 8u")
if (id3 != 'ID3'):
raise ValueError("invalid ID3 header")
elif (major_version != 0x03):
raise ValueError("invalid major version")
elif (minor_version != 0x00):
raise ValueError("invalid minor version")
total_size = decode_syncsafe32(reader)
frames = []
while (total_size > 0):
(frame_id, frame_size, frame_flags) = reader.parse("4b 32u 16u")
if (frame_id == chr(0) * 4):
break
elif (frame_id == 'TXXX'):
frames.append(
cls.USER_TEXT_FRAME.parse(
frame_id, frame_size, reader.substream(frame_size)))
elif (frame_id == 'WXXX'):
frames.append(
cls.USER_WEB_FRAME.parse(
frame_id, frame_size, reader.substream(frame_size)))
elif (frame_id == 'COMM'):
frames.append(
cls.COMMENT_FRAME.parse(
frame_id, frame_size, reader.substream(frame_size)))
elif (frame_id == 'APIC'):
frames.append(
cls.IMAGE_FRAME.parse(
frame_id, frame_size, reader.substream(frame_size)))
elif (frame_id.startswith('T')):
frames.append(
cls.TEXT_FRAME.parse(
frame_id, frame_size, reader.substream(frame_size)))
elif (frame_id.startswith('W')):
frames.append(
cls.WEB_FRAME.parse(
frame_id, frame_size, reader.substream(frame_size)))
else:
frames.append(
cls.RAW_FRAME.parse(
frame_id, frame_size, reader.substream(frame_size)))
total_size -= (10 + frame_size)
return cls(frames)
def build(self, writer):
"""writes the complete ID3v23Comment data
to the given BitstreamWriter"""
from operator import add
writer.build("3b 8u 8u 8u", ("ID3", 0x03, 0x00, 0x00))
encode_syncsafe32(writer,
reduce(add,
[10 + frame.size() for frame in self], 0))
for frame in self:
writer.build("4b 32u 16u", (frame.id, frame.size(), 0))
frame.build(writer)
def size(self):
"""returns the total size of the ID3v23Comment, including its header"""
from operator import add
return reduce(add, [10 + frame.size() for frame in self], 10)
############################################################
# ID3v2.4 Comment
############################################################
class ID3v24_Frame(ID3v23_Frame):
def __repr__(self):
return "ID3v24_Frame(%s, %s)" % (repr(self.id), repr(self.data))
class ID3v24_T___Frame(ID3v23_T___Frame):
def __init__(self, frame_id, encoding, data):
assert((encoding == 0) or (encoding == 1) or
(encoding == 2) or (encoding == 3))
self.id = frame_id
self.encoding = encoding
self.data = data
def __repr__(self):
return "ID3v24_T___Frame(%s, %s, %s)" % \
(repr(self.id), repr(self.encoding), repr(self.data))
def __unicode__(self):
return self.data.decode(
{0: u"latin-1",
1: u"utf-16",
2: u"utf-16BE",
3: u"utf-8"}[self.encoding], 'replace').split(unichr(0), 1)[0]
def raw_info(self):
"""returns a human-readable version of this frame as unicode"""
return u"%s = (%s) %s" % (self.id.decode('ascii'),
{0: u"Latin-1",
1: u"UTF-16",
2: u"UTF-16BE",
3: u"UTF-8"}[self.encoding],
unicode(self))
@classmethod
def converted(cls, frame_id, unicode_string):
"""given a unicode string, returns a text frame"""
if (is_latin_1(unicode_string)):
return cls(frame_id, 0, unicode_string.encode('latin-1'))
else:
return cls(frame_id, 3, unicode_string.encode('utf-8'))
class ID3v24_TXXX_Frame(ID3v23_TXXX_Frame):
def __repr__(self):
return "ID3v24_TXXX_Frame(%s, %s, %s)" % \
(repr(self.encoding), repr(self.description), repr(self.data))
def raw_info(self):
"""returns a human-readable version of this frame as unicode"""
return u"%s = (%s, \"%s\") %s" % \
(self.id,
{0: u"Latin-1",
1: u"UTF-16",
2: u"UTF-16BE",
3: u"UTF-8"}[self.encoding],
self.description,
unicode(self))
def __unicode__(self):
return self.data.decode(
{0: u"latin-1",
1: u"utf-16",
2: u"utf-16BE",
3: u"utf-8"}[self.encoding], 'replace').split(unichr(0), 1)[0]
@classmethod
def parse(cls, frame_id, frame_size, reader):
"""given a frame_id string, frame_size int and BitstreamReader
of the remaining frame data, returns a parsed text frame"""
encoding = reader.read(8)
description = C_string.parse({0: "latin-1",
1: "utf-16",
2: "utf-16be",
3: "utf-8"}[encoding],
reader)
data = reader.read_bytes(frame_size - 1 - description.size())
return cls(encoding, description, data)
class ID3v24_APIC_Frame(ID3v23_APIC_Frame):
def __repr__(self):
return "ID3v24_APIC_Frame(%s, %s, %s, ...)" % \
(repr(self.pic_mime_type), repr(self.pic_type),
repr(self.pic_description))
def __setattr__(self, attr, value):
if (attr == 'type'):
self.__dict__["pic_type"] = {0: 3, # front cover
1: 4, # back cover
2: 5, # leaflet page
3: 6, # media
}.get(value, 0) # other
elif (attr == 'description'):
if (is_latin_1(value)):
self.__dict__["pic_description"] = C_string('latin-1', value)
else:
self.__dict__["pic_description"] = C_string('utf-8', value)
elif (attr == 'mime_type'):
self.__dict__["pic_mime_type"] = C_string('ascii', value)
else:
self.__dict__[attr] = value
@classmethod
def parse(cls, frame_id, frame_size, reader):
"""parses this frame from the given BitstreamReader"""
encoding = reader.read(8)
mime_type = C_string.parse('ascii', reader)
picture_type = reader.read(8)
description = C_string.parse({0: 'latin-1',
1: 'utf-16',
2: 'utf-16be',
3: 'utf-8'}[encoding], reader)
data = reader.read_bytes(frame_size - (1 +
mime_type.size() +
1 +
description.size()))
return cls(mime_type, picture_type, description, data)
def build(self, writer):
"""writes this frame to the given BitstreamWriter
not including its frame header"""
writer.write(8, {'latin-1': 0,
'utf-16': 1,
'utf-16be': 2,
'utf-8': 3}[self.pic_description.encoding])
self.pic_mime_type.build(writer)
writer.write(8, self.pic_type)
self.pic_description.build(writer)
writer.write_bytes(self.data)
@classmethod
def converted(cls, frame_id, image):
if (is_latin_1(image.description)):
description = C_string('latin-1', image.description)
else:
description = C_string('utf-8', image.description)
return cls(mime_type=C_string('ascii', image.mime_type),
picture_type={0: 3, # front cover
1: 4, # back cover
2: 5, # leaflet page
3: 6, # media
}.get(image.type, 0), # other
description=description,
data=image.data)
def clean(self, fixes_performed):
"""returns a cleaned ID3v24_APIC_Frame,
or None if the frame should be removed entirely
any fixes are appended to fixes_applied as unicode string"""
actual_mime_type = Image.new(self.data, u"", 0).mime_type
if (unicode(self.pic_mime_type) != actual_mime_type):
from audiotools.text import (CLEAN_FIX_IMAGE_FIELDS)
fixes_performed.append(CLEAN_FIX_IMAGE_FIELDS)
return ID3v24_APIC_Frame(
C_string('ascii',
actual_mime_type.encode('ascii')),
self.pic_type,
self.pic_description,
self.data)
else:
return ID3v24_APIC_Frame(
self.pic_mime_type,
self.pic_type,
self.pic_description,
self.data)
class ID3v24_W___Frame(ID3v23_W___Frame):
def __repr__(self):
return "ID3v24_W___Frame(%s, %s)" % \
(repr(self.id), repr(self.data))
class ID3v24_WXXX_Frame(ID3v23_WXXX_Frame):
def __repr__(self):
return "ID3v24_WXXX_Frame(%s, %s, %s)" % \
(repr(self.encoding), repr(self.description), repr(self.data))
def raw_info(self):
"""returns a human-readable version of this frame as unicode"""
return u"%s = (%s, \"%s\") %s" % \
(self.id,
{0: u'Latin-1',
1: u'UTF-16',
2: u'UTF-16BE',
3: u'UTF-8'}[self.encoding],
self.description,
self.data.decode('ascii', 'replace'))
@classmethod
def parse(cls, frame_id, frame_size, reader):
"""given a frame_id string, frame_size int and BitstreamReader
of the remaining frame data, returns a parsed text frame"""
encoding = reader.read(8)
description = C_string.parse({0: 'latin-1',
1: 'utf-16',
2: 'utf-16be',
3: 'utf-8'}[encoding],
reader)
data = reader.read_bytes(frame_size - 1 - description.size())
return cls(encoding, description, data)
class ID3v24_COMM_Frame(ID3v23_COMM_Frame):
def __repr__(self):
return "ID3v24_COMM_Frame(%s, %s, %s, %s)" % \
(repr(self.encoding), repr(self.language),
repr(self.short_description), repr(self.data))
def __unicode__(self):
return self.data.decode({0: 'latin-1',
1: 'utf-16',
2: 'utf-16be',
3: 'utf-8'}[self.encoding], 'replace')
def raw_info(self):
"""returns a human-readable version of this frame as unicode"""
return u"COMM = (%s, %s, \"%s\") %s" % \
({0: u'Latin-1',
1: u'UTF-16',
2: u'UTF-16BE',
3: u'UTF-8'}[self.encoding],
self.language.decode('ascii', 'replace'),
self.short_description,
self.data.decode({0: 'latin-1',
1: 'utf-16',
2: 'utf-16be',
3: 'utf-8'}[self.encoding]))
@classmethod
def parse(cls, frame_id, frame_size, reader):
"""given a frame_id string, frame_size int and BitstreamReader
of the remaining frame data, returns a parsed ID3v22_COM_Frame"""
(encoding, language) = reader.parse("8u 3b")
short_description = C_string.parse({0: 'latin-1',
1: 'utf-16',
2: 'utf-16be',
3: 'utf-8'}[encoding],
reader)
data = reader.read_bytes(frame_size - (4 + short_description.size()))
return cls(encoding, language, short_description, data)
@classmethod
def converted(cls, frame_id, unicode_string):
if (is_latin_1(unicode_string)):
return cls(0, "eng", C_string("latin-1", u""),
unicode_string.encode('latin-1'))
else:
return cls(3, "eng", C_string("utf-8", u""),
unicode_string.encode('utf-8'))
def clean(self, fixes_performed):
"""returns a cleaned frame of the same class
or None if the frame should be omitted
fix text will be appended to fixes_performed, if necessary"""
from .text import (CLEAN_REMOVE_EMPTY_TAG,
CLEAN_REMOVE_TRAILING_WHITESPACE,
CLEAN_REMOVE_LEADING_WHITESPACE)
field = self.id.decode('ascii')
text_encoding = {0: 'latin-1',
1: 'utf-16',
2: 'utf-16be',
3: 'utf-8'}
value = self.data.decode(text_encoding[self.encoding], 'replace')
#check for an empty tag
if (len(value.strip()) == 0):
fixes_performed.append(CLEAN_REMOVE_EMPTY_TAG %
{"field": field})
return None
#check trailing whitespace
fix1 = value.rstrip()
if (fix1 != value):
fixes_performed.append(CLEAN_REMOVE_TRAILING_WHITESPACE %
{"field": field})
#check leading whitespace
fix2 = fix1.lstrip()
if (fix2 != fix1):
fixes_performed.append(CLEAN_REMOVE_LEADING_WHITESPACE %
{"field": field})
#stripping whitespace shouldn't alter text/description encoding
return self.__class__(self.encoding,
self.language,
self.short_description,
fix2.encode(text_encoding[self.encoding]))
class ID3v24Comment(ID3v23Comment):
NAME = u'ID3v2.4'
RAW_FRAME = ID3v24_Frame
TEXT_FRAME = ID3v24_T___Frame
USER_TEXT_FRAME = ID3v24_TXXX_Frame
WEB_FRAME = ID3v24_W___Frame
USER_WEB_FRAME = ID3v24_WXXX_Frame
COMMENT_FRAME = ID3v24_COMM_Frame
IMAGE_FRAME = ID3v24_APIC_Frame
IMAGE_FRAME_ID = 'APIC'
ITUNES_COMPILATION_ID = 'TCMP'
def __repr__(self):
return "ID3v24Comment(%s)" % (repr(self.frames))
@classmethod
def parse(cls, reader):
"""given a BitstreamReader, returns a parsed ID3v24Comment"""
(id3,
major_version,
minor_version,
flags) = reader.parse("3b 8u 8u 8u")
if (id3 != 'ID3'):
raise ValueError("invalid ID3 header")
elif (major_version != 0x04):
raise ValueError("invalid major version")
elif (minor_version != 0x00):
raise ValueError("invalid minor version")
total_size = decode_syncsafe32(reader)
frames = []
while (total_size > 0):
frame_id = reader.read_bytes(4)
frame_size = decode_syncsafe32(reader)
flags = reader.read(16)
if (frame_id == chr(0) * 4):
break
elif (frame_id == 'TXXX'):
frames.append(
cls.USER_TEXT_FRAME.parse(
frame_id, frame_size, reader.substream(frame_size)))
elif (frame_id == 'WXXX'):
frames.append(
cls.USER_WEB_FRAME.parse(
frame_id, frame_size, reader.substream(frame_size)))
elif (frame_id == 'COMM'):
frames.append(
cls.COMMENT_FRAME.parse(
frame_id, frame_size, reader.substream(frame_size)))
elif (frame_id == 'APIC'):
frames.append(
cls.IMAGE_FRAME.parse(
frame_id, frame_size, reader.substream(frame_size)))
elif (frame_id.startswith('T')):
frames.append(
cls.TEXT_FRAME.parse(
frame_id, frame_size, reader.substream(frame_size)))
elif (frame_id.startswith('W')):
frames.append(
cls.WEB_FRAME.parse(
frame_id, frame_size, reader.substream(frame_size)))
else:
frames.append(
cls.RAW_FRAME.parse(
frame_id, frame_size, reader.substream(frame_size)))
total_size -= (10 + frame_size)
return cls(frames)
def build(self, writer):
"""writes the complete ID3v24Comment data
to the given BitstreamWriter"""
from operator import add
writer.build("3b 8u 8u 8u", ("ID3", 0x04, 0x00, 0x00))
encode_syncsafe32(writer,
reduce(add, [10 + frame.size() for frame in self],
0))
for frame in self:
writer.write_bytes(frame.id)
encode_syncsafe32(writer, frame.size())
writer.write(16, 0)
frame.build(writer)
def size(self):
"""returns the total size of the ID3v24Comment, including its header"""
from operator import add
return reduce(add, [10 + frame.size() for frame in self], 10)
ID3v2Comment = ID3v22Comment
class ID3CommentPair(MetaData):
"""a pair of ID3v2/ID3v1 comments
these can be manipulated as a set"""
def __init__(self, id3v2_comment, id3v1_comment):
"""id3v2 and id3v1 are ID3v2Comment and ID3v1Comment objects or None
values in ID3v2 take precendence over ID3v1, if present"""
self.__dict__['id3v2'] = id3v2_comment
self.__dict__['id3v1'] = id3v1_comment
if (self.id3v2 is not None):
base_comment = self.id3v2
elif (self.id3v1 is not None):
base_comment = self.id3v1
else:
raise ValueError("ID3v2 and ID3v1 cannot both be blank")
def __getattr__(self, key):
if (key in self.FIELDS):
if (((self.id3v2 is not None) and
(getattr(self.id3v2, key) is not None))):
return getattr(self.id3v2, key)
elif (self.id3v1 is not None):
return getattr(self.id3v1, key)
else:
raise ValueError("ID3v2 and ID3v1 cannot both be blank")
else:
raise AttributeError(key)
def __setattr__(self, key, value):
self.__dict__[key] = value
if (self.id3v2 is not None):
setattr(self.id3v2, key, value)
if (self.id3v1 is not None):
setattr(self.id3v1, key, value)
def __delattr__(self, key):
if (self.id3v2 is not None):
delattr(self.id3v2, key)
if (self.id3v1 is not None):
delattr(self.id3v1, key)
@classmethod
def converted(cls, metadata,
id3v2_class=ID3v23Comment,
id3v1_class=ID3v1Comment):
"""takes a MetaData object and returns an ID3CommentPair object"""
if (metadata is None):
return None
elif (isinstance(metadata, ID3CommentPair)):
return ID3CommentPair(
metadata.id3v2.__class__.converted(metadata.id3v2),
metadata.id3v1.__class__.converted(metadata.id3v1))
elif (isinstance(metadata, ID3v2Comment)):
return ID3CommentPair(metadata,
id3v1_class.converted(metadata))
else:
return ID3CommentPair(
id3v2_class.converted(metadata),
id3v1_class.converted(metadata))
def raw_info(self):
"""returns a human-readable version of this metadata pair
as a unicode string"""
if ((self.id3v2 is not None) and (self.id3v1 is not None)):
#both comments present
from os import linesep
return (self.id3v2.raw_info() +
linesep.decode('ascii') * 2 +
self.id3v1.raw_info())
elif (self.id3v2 is not None):
#only ID3v2
return self.id3v2.raw_info()
elif (self.id3v1 is not None):
#only ID3v1
return self.id3v1.raw_info()
else:
return u''
#ImageMetaData passthroughs
def images(self):
"""returns a list of embedded Image objects"""
if (self.id3v2 is not None):
return self.id3v2.images()
else:
return []
def add_image(self, image):
"""embeds an Image object in this metadata"""
if (self.id3v2 is not None):
self.id3v2.add_image(image)
def delete_image(self, image):
"""deletes an Image object from this metadata"""
if (self.id3v2 is not None):
self.id3v2.delete_image(image)
@classmethod
def supports_images(cls):
"""returns True"""
return True
def clean(self, fixes_performed):
if (self.id3v2 is not None):
new_id3v2 = self.id3v2.clean(fixes_performed)
else:
new_id3v2 = None
if (self.id3v1 is not None):
new_id3v1 = self.id3v1.clean(fixes_performed)
else:
new_id3v1 = None
return ID3CommentPair(new_id3v2, new_id3v1)
| Excito/audiotools | audiotools/id3.py | Python | gpl-2.0 | 82,383 | [
"Brian"
] | 81aa7ccf5e0d52790199f1ab7c0b3c03e6d76c9b743d23d479c6cd4a073c79c3 |
# Copyright Iris contributors
#
# This file is part of Iris and is released under the LGPL license.
# See COPYING and COPYING.LESSER in the root of the repository for full
# licensing details.
"""
Unit tests for the :class:`iris.coords.CellMethod`.
"""
# Import iris.tests first so that some things can be initialised before
# importing anything else.
import iris.tests as tests
from iris._cube_coord_common import CFVariableMixin
from iris.coords import CellMethod, AuxCoord
class Test(tests.IrisTest):
def setUp(self):
self.method = "mean"
def _check(self, token, coord, default=False):
result = CellMethod(self.method, coords=coord)
token = token if not default else CFVariableMixin._DEFAULT_NAME
expected = "{}: {}".format(self.method, token)
self.assertEqual(str(result), expected)
def test_coord_standard_name(self):
token = "air_temperature"
coord = AuxCoord(1, standard_name=token)
self._check(token, coord)
def test_coord_long_name(self):
token = "long_name"
coord = AuxCoord(1, long_name=token)
self._check(token, coord)
def test_coord_long_name_default(self):
token = "long name" # includes space
coord = AuxCoord(1, long_name=token)
self._check(token, coord, default=True)
def test_coord_var_name(self):
token = "var_name"
coord = AuxCoord(1, var_name=token)
self._check(token, coord)
def test_coord_var_name_fail(self):
token = "var name" # includes space
emsg = "is not a valid NetCDF variable name"
with self.assertRaisesRegex(ValueError, emsg):
AuxCoord(1, var_name=token)
def test_coord_stash(self):
token = "stash"
coord = AuxCoord(1, attributes=dict(STASH=token))
self._check(token, coord)
def test_coord_stash_default(self):
token = "_stash" # includes leading underscore
coord = AuxCoord(1, attributes=dict(STASH=token))
self._check(token, coord, default=True)
def test_string(self):
token = "air_temperature"
result = CellMethod(self.method, coords=token)
expected = "{}: {}".format(self.method, token)
self.assertEqual(str(result), expected)
def test_string_default(self):
token = "air temperature" # includes space
result = CellMethod(self.method, coords=token)
expected = "{}: unknown".format(self.method)
self.assertEqual(str(result), expected)
def test_mixture(self):
token = "air_temperature"
coord = AuxCoord(1, standard_name=token)
result = CellMethod(self.method, coords=[coord, token])
expected = "{}: {}, {}".format(self.method, token, token)
self.assertEqual(str(result), expected)
def test_mixture_default(self):
token = "air temperature" # includes space
coord = AuxCoord(1, long_name=token)
result = CellMethod(self.method, coords=[coord, token])
expected = "{}: unknown, unknown".format(self.method, token, token)
self.assertEqual(str(result), expected)
if __name__ == "__main__":
tests.main()
| pp-mo/iris | lib/iris/tests/unit/coords/test_CellMethod.py | Python | lgpl-3.0 | 3,175 | [
"NetCDF"
] | f41c253b7a7a1b846b7503465fdfd8261597e2fe171ecc0e7ea1288fb9b65343 |
#!/usr/bin/env python
"""
Fix errors in a dataset.
For now, only removing erroneous lines is supported.
usage: %prog input errorsfile output
-x, --ext: dataset extension (type)
-m, --methods=N: comma separated list of repair methods
"""
import pkg_resources; pkg_resources.require( "bx-python" )
from bx.cookbook import doc_optparse
from galaxy import util
def main():
options, args = doc_optparse.parse( __doc__ )
methods = []
try:
if options.methods: methods = options.methods.split(",")
except:
pass
ext = options.ext
in_file = open(args[0], "r")
error_file = open(args[1], "r")
out_file = open(args[2], "w")
# string_to_object errors
error_list = util.string_to_object(error_file.read())
# index by error type and then by line number
error_lines = {}
error_types = {}
for error in error_list:
if error.linenum:
if error.linenum in error_lines:
error_lines[error.linenum].append(error)
else:
error_lines[error.linenum] = [error]
error_type = error.__class__.__name__
if error_type in error_types:
error_types[error_type].append(error)
else:
error_types[error_type] = [error]
linenum = 0
for line in in_file:
linenum += 1
# write unless
if "lines" in methods:
if linenum in error_lines:
line = None
# other processing here?
if line:
out_file.write(line)
if __name__ == "__main__":
main()
| volpino/Yeps-EURAC | tools/validation/fix_errors.py | Python | mit | 1,596 | [
"Galaxy"
] | 43a75c8215b93ad4cb425ba3a83181255b4b7552402da2186b280d664edf8c08 |
import enum
import sys
import unittest
from enum import Enum, IntEnum, unique, EnumMeta
from pickle import dumps, loads, PicklingError, HIGHEST_PROTOCOL
pyver = float('%s.%s' % sys.version_info[:2])
try:
any
except NameError:
def any(iterable):
for element in iterable:
if element:
return True
return False
try:
unicode
except NameError:
unicode = str
try:
from collections import OrderedDict
except ImportError:
OrderedDict = None
# for pickle tests
try:
class Stooges(Enum):
LARRY = 1
CURLY = 2
MOE = 3
except Exception:
Stooges = sys.exc_info()[1]
try:
class IntStooges(int, Enum):
LARRY = 1
CURLY = 2
MOE = 3
except Exception:
IntStooges = sys.exc_info()[1]
try:
class FloatStooges(float, Enum):
LARRY = 1.39
CURLY = 2.72
MOE = 3.142596
except Exception:
FloatStooges = sys.exc_info()[1]
# for pickle test and subclass tests
try:
class StrEnum(str, Enum):
'accepts only string values'
class Name(StrEnum):
BDFL = 'Guido van Rossum'
FLUFL = 'Barry Warsaw'
except Exception:
Name = sys.exc_info()[1]
try:
Question = Enum('Question', 'who what when where why', module=__name__)
except Exception:
Question = sys.exc_info()[1]
try:
Answer = Enum('Answer', 'him this then there because')
except Exception:
Answer = sys.exc_info()[1]
try:
Theory = Enum('Theory', 'rule law supposition', qualname='spanish_inquisition')
except Exception:
Theory = sys.exc_info()[1]
# for doctests
try:
class Fruit(Enum):
tomato = 1
banana = 2
cherry = 3
except Exception:
pass
def test_pickle_dump_load(assertion, source, target=None,
protocol=(0, HIGHEST_PROTOCOL)):
start, stop = protocol
failures = []
for protocol in range(start, stop+1):
try:
if target is None:
assertion(loads(dumps(source, protocol=protocol)) is source)
else:
assertion(loads(dumps(source, protocol=protocol)), target)
except Exception:
exc, tb = sys.exc_info()[1:]
failures.append('%2d: %s' %(protocol, exc))
if failures:
raise ValueError('Failed with protocols: %s' % ', '.join(failures))
def test_pickle_exception(assertion, exception, obj,
protocol=(0, HIGHEST_PROTOCOL)):
start, stop = protocol
failures = []
for protocol in range(start, stop+1):
try:
assertion(exception, dumps, obj, protocol=protocol)
except Exception:
exc = sys.exc_info()[1]
failures.append('%d: %s %s' % (protocol, exc.__class__.__name__, exc))
if failures:
raise ValueError('Failed with protocols: %s' % ', '.join(failures))
class TestHelpers(unittest.TestCase):
# _is_descriptor, _is_sunder, _is_dunder
def test_is_descriptor(self):
class foo:
pass
for attr in ('__get__','__set__','__delete__'):
obj = foo()
self.assertFalse(enum._is_descriptor(obj))
setattr(obj, attr, 1)
self.assertTrue(enum._is_descriptor(obj))
def test_is_sunder(self):
for s in ('_a_', '_aa_'):
self.assertTrue(enum._is_sunder(s))
for s in ('a', 'a_', '_a', '__a', 'a__', '__a__', '_a__', '__a_', '_',
'__', '___', '____', '_____',):
self.assertFalse(enum._is_sunder(s))
def test_is_dunder(self):
for s in ('__a__', '__aa__'):
self.assertTrue(enum._is_dunder(s))
for s in ('a', 'a_', '_a', '__a', 'a__', '_a_', '_a__', '__a_', '_',
'__', '___', '____', '_____',):
self.assertFalse(enum._is_dunder(s))
class TestEnum(unittest.TestCase):
def setUp(self):
class Season(Enum):
SPRING = 1
SUMMER = 2
AUTUMN = 3
WINTER = 4
self.Season = Season
class Konstants(float, Enum):
E = 2.7182818
PI = 3.1415926
TAU = 2 * PI
self.Konstants = Konstants
class Grades(IntEnum):
A = 5
B = 4
C = 3
D = 2
F = 0
self.Grades = Grades
class Directional(str, Enum):
EAST = 'east'
WEST = 'west'
NORTH = 'north'
SOUTH = 'south'
self.Directional = Directional
from datetime import date
class Holiday(date, Enum):
NEW_YEAR = 2013, 1, 1
IDES_OF_MARCH = 2013, 3, 15
self.Holiday = Holiday
if pyver >= 3.0: # do not specify custom `dir` on previous versions
def test_dir_on_class(self):
Season = self.Season
self.assertEqual(
set(dir(Season)),
set(['__class__', '__doc__', '__members__', '__module__',
'SPRING', 'SUMMER', 'AUTUMN', 'WINTER']),
)
def test_dir_on_item(self):
Season = self.Season
self.assertEqual(
set(dir(Season.WINTER)),
set(['__class__', '__doc__', '__module__', 'name', 'value']),
)
def test_dir_with_added_behavior(self):
class Test(Enum):
this = 'that'
these = 'those'
def wowser(self):
return ("Wowser! I'm %s!" % self.name)
self.assertEqual(
set(dir(Test)),
set(['__class__', '__doc__', '__members__', '__module__', 'this', 'these']),
)
self.assertEqual(
set(dir(Test.this)),
set(['__class__', '__doc__', '__module__', 'name', 'value', 'wowser']),
)
def test_dir_on_sub_with_behavior_on_super(self):
# see issue22506
class SuperEnum(Enum):
def invisible(self):
return "did you see me?"
class SubEnum(SuperEnum):
sample = 5
self.assertEqual(
set(dir(SubEnum.sample)),
set(['__class__', '__doc__', '__module__', 'name', 'value', 'invisible']),
)
if pyver >= 2.7: # OrderedDict first available here
def test_members_is_ordereddict_if_ordered(self):
class Ordered(Enum):
__order__ = 'first second third'
first = 'bippity'
second = 'boppity'
third = 'boo'
self.assertTrue(type(Ordered.__members__) is OrderedDict)
def test_members_is_ordereddict_if_not_ordered(self):
class Unordered(Enum):
this = 'that'
these = 'those'
self.assertTrue(type(Unordered.__members__) is OrderedDict)
if pyver >= 3.0: # all objects are ordered in Python 2.x
def test_members_is_always_ordered(self):
class AlwaysOrdered(Enum):
first = 1
second = 2
third = 3
self.assertTrue(type(AlwaysOrdered.__members__) is OrderedDict)
def test_comparisons(self):
def bad_compare():
Season.SPRING > 4
Season = self.Season
self.assertNotEqual(Season.SPRING, 1)
self.assertRaises(TypeError, bad_compare)
class Part(Enum):
SPRING = 1
CLIP = 2
BARREL = 3
self.assertNotEqual(Season.SPRING, Part.SPRING)
def bad_compare():
Season.SPRING < Part.CLIP
self.assertRaises(TypeError, bad_compare)
def test_enum_in_enum_out(self):
Season = self.Season
self.assertTrue(Season(Season.WINTER) is Season.WINTER)
def test_enum_value(self):
Season = self.Season
self.assertEqual(Season.SPRING.value, 1)
def test_intenum_value(self):
self.assertEqual(IntStooges.CURLY.value, 2)
def test_enum(self):
Season = self.Season
lst = list(Season)
self.assertEqual(len(lst), len(Season))
self.assertEqual(len(Season), 4, Season)
self.assertEqual(
[Season.SPRING, Season.SUMMER, Season.AUTUMN, Season.WINTER], lst)
for i, season in enumerate('SPRING SUMMER AUTUMN WINTER'.split()):
i += 1
e = Season(i)
self.assertEqual(e, getattr(Season, season))
self.assertEqual(e.value, i)
self.assertNotEqual(e, i)
self.assertEqual(e.name, season)
self.assertTrue(e in Season)
self.assertTrue(type(e) is Season)
self.assertTrue(isinstance(e, Season))
self.assertEqual(str(e), 'Season.' + season)
self.assertEqual(
repr(e),
'<Season.%s: %s>' % (season, i),
)
def test_value_name(self):
Season = self.Season
self.assertEqual(Season.SPRING.name, 'SPRING')
self.assertEqual(Season.SPRING.value, 1)
def set_name(obj, new_value):
obj.name = new_value
def set_value(obj, new_value):
obj.value = new_value
self.assertRaises(AttributeError, set_name, Season.SPRING, 'invierno', )
self.assertRaises(AttributeError, set_value, Season.SPRING, 2)
def test_attribute_deletion(self):
class Season(Enum):
SPRING = 1
SUMMER = 2
AUTUMN = 3
WINTER = 4
def spam(cls):
pass
self.assertTrue(hasattr(Season, 'spam'))
del Season.spam
self.assertFalse(hasattr(Season, 'spam'))
self.assertRaises(AttributeError, delattr, Season, 'SPRING')
self.assertRaises(AttributeError, delattr, Season, 'DRY')
self.assertRaises(AttributeError, delattr, Season.SPRING, 'name')
def test_invalid_names(self):
def create_bad_class_1():
class Wrong(Enum):
mro = 9
def create_bad_class_2():
class Wrong(Enum):
_reserved_ = 3
self.assertRaises(ValueError, create_bad_class_1)
self.assertRaises(ValueError, create_bad_class_2)
# TODO: enable when Python 3.6 is released
# def test_bool(self):
# class Logic(Enum):
# true = True
# false = False
# self.assertTrue(Logic.true)
# self.assertFalse(Logic.false)
def test_contains(self):
Season = self.Season
self.assertTrue(Season.AUTUMN in Season)
self.assertTrue(3 not in Season)
val = Season(3)
self.assertTrue(val in Season)
class OtherEnum(Enum):
one = 1; two = 2
self.assertTrue(OtherEnum.two not in Season)
if pyver >= 2.6: # when `format` came into being
def test_format_enum(self):
Season = self.Season
self.assertEqual('{0}'.format(Season.SPRING),
'{0}'.format(str(Season.SPRING)))
self.assertEqual( '{0:}'.format(Season.SPRING),
'{0:}'.format(str(Season.SPRING)))
self.assertEqual('{0:20}'.format(Season.SPRING),
'{0:20}'.format(str(Season.SPRING)))
self.assertEqual('{0:^20}'.format(Season.SPRING),
'{0:^20}'.format(str(Season.SPRING)))
self.assertEqual('{0:>20}'.format(Season.SPRING),
'{0:>20}'.format(str(Season.SPRING)))
self.assertEqual('{0:<20}'.format(Season.SPRING),
'{0:<20}'.format(str(Season.SPRING)))
def test_format_enum_custom(self):
class TestFloat(float, Enum):
one = 1.0
two = 2.0
def __format__(self, spec):
return 'TestFloat success!'
self.assertEqual('{0}'.format(TestFloat.one), 'TestFloat success!')
def assertFormatIsValue(self, spec, member):
self.assertEqual(spec.format(member), spec.format(member.value))
def test_format_enum_date(self):
Holiday = self.Holiday
self.assertFormatIsValue('{0}', Holiday.IDES_OF_MARCH)
self.assertFormatIsValue('{0:}', Holiday.IDES_OF_MARCH)
self.assertFormatIsValue('{0:20}', Holiday.IDES_OF_MARCH)
self.assertFormatIsValue('{0:^20}', Holiday.IDES_OF_MARCH)
self.assertFormatIsValue('{0:>20}', Holiday.IDES_OF_MARCH)
self.assertFormatIsValue('{0:<20}', Holiday.IDES_OF_MARCH)
self.assertFormatIsValue('{0:%Y %m}', Holiday.IDES_OF_MARCH)
self.assertFormatIsValue('{0:%Y %m %M:00}', Holiday.IDES_OF_MARCH)
def test_format_enum_float(self):
Konstants = self.Konstants
self.assertFormatIsValue('{0}', Konstants.TAU)
self.assertFormatIsValue('{0:}', Konstants.TAU)
self.assertFormatIsValue('{0:20}', Konstants.TAU)
self.assertFormatIsValue('{0:^20}', Konstants.TAU)
self.assertFormatIsValue('{0:>20}', Konstants.TAU)
self.assertFormatIsValue('{0:<20}', Konstants.TAU)
self.assertFormatIsValue('{0:n}', Konstants.TAU)
self.assertFormatIsValue('{0:5.2}', Konstants.TAU)
self.assertFormatIsValue('{0:f}', Konstants.TAU)
def test_format_enum_int(self):
Grades = self.Grades
self.assertFormatIsValue('{0}', Grades.C)
self.assertFormatIsValue('{0:}', Grades.C)
self.assertFormatIsValue('{0:20}', Grades.C)
self.assertFormatIsValue('{0:^20}', Grades.C)
self.assertFormatIsValue('{0:>20}', Grades.C)
self.assertFormatIsValue('{0:<20}', Grades.C)
self.assertFormatIsValue('{0:+}', Grades.C)
self.assertFormatIsValue('{0:08X}', Grades.C)
self.assertFormatIsValue('{0:b}', Grades.C)
def test_format_enum_str(self):
Directional = self.Directional
self.assertFormatIsValue('{0}', Directional.WEST)
self.assertFormatIsValue('{0:}', Directional.WEST)
self.assertFormatIsValue('{0:20}', Directional.WEST)
self.assertFormatIsValue('{0:^20}', Directional.WEST)
self.assertFormatIsValue('{0:>20}', Directional.WEST)
self.assertFormatIsValue('{0:<20}', Directional.WEST)
def test_hash(self):
Season = self.Season
dates = {}
dates[Season.WINTER] = '1225'
dates[Season.SPRING] = '0315'
dates[Season.SUMMER] = '0704'
dates[Season.AUTUMN] = '1031'
self.assertEqual(dates[Season.AUTUMN], '1031')
def test_enum_duplicates(self):
__order__ = "SPRING SUMMER AUTUMN WINTER"
class Season(Enum):
SPRING = 1
SUMMER = 2
AUTUMN = FALL = 3
WINTER = 4
ANOTHER_SPRING = 1
lst = list(Season)
self.assertEqual(
lst,
[Season.SPRING, Season.SUMMER,
Season.AUTUMN, Season.WINTER,
])
self.assertTrue(Season.FALL is Season.AUTUMN)
self.assertEqual(Season.FALL.value, 3)
self.assertEqual(Season.AUTUMN.value, 3)
self.assertTrue(Season(3) is Season.AUTUMN)
self.assertTrue(Season(1) is Season.SPRING)
self.assertEqual(Season.FALL.name, 'AUTUMN')
self.assertEqual(
set([k for k,v in Season.__members__.items() if v.name != k]),
set(['FALL', 'ANOTHER_SPRING']),
)
if pyver >= 3.0:
cls = vars()
result = {'Enum':Enum}
exec("""def test_duplicate_name(self):
with self.assertRaises(TypeError):
class Color(Enum):
red = 1
green = 2
blue = 3
red = 4
with self.assertRaises(TypeError):
class Color(Enum):
red = 1
green = 2
blue = 3
def red(self):
return 'red'
with self.assertRaises(TypeError):
class Color(Enum):
@property
def red(self):
return 'redder'
red = 1
green = 2
blue = 3""",
result)
cls['test_duplicate_name'] = result['test_duplicate_name']
def test_enum_with_value_name(self):
class Huh(Enum):
name = 1
value = 2
self.assertEqual(
list(Huh),
[Huh.name, Huh.value],
)
self.assertTrue(type(Huh.name) is Huh)
self.assertEqual(Huh.name.name, 'name')
self.assertEqual(Huh.name.value, 1)
def test_intenum_from_scratch(self):
class phy(int, Enum):
pi = 3
tau = 2 * pi
self.assertTrue(phy.pi < phy.tau)
def test_intenum_inherited(self):
class IntEnum(int, Enum):
pass
class phy(IntEnum):
pi = 3
tau = 2 * pi
self.assertTrue(phy.pi < phy.tau)
def test_floatenum_from_scratch(self):
class phy(float, Enum):
pi = 3.1415926
tau = 2 * pi
self.assertTrue(phy.pi < phy.tau)
def test_floatenum_inherited(self):
class FloatEnum(float, Enum):
pass
class phy(FloatEnum):
pi = 3.1415926
tau = 2 * pi
self.assertTrue(phy.pi < phy.tau)
def test_strenum_from_scratch(self):
class phy(str, Enum):
pi = 'Pi'
tau = 'Tau'
self.assertTrue(phy.pi < phy.tau)
def test_strenum_inherited(self):
class StrEnum(str, Enum):
pass
class phy(StrEnum):
pi = 'Pi'
tau = 'Tau'
self.assertTrue(phy.pi < phy.tau)
def test_intenum(self):
class WeekDay(IntEnum):
SUNDAY = 1
MONDAY = 2
TUESDAY = 3
WEDNESDAY = 4
THURSDAY = 5
FRIDAY = 6
SATURDAY = 7
self.assertEqual(['a', 'b', 'c'][WeekDay.MONDAY], 'c')
self.assertEqual([i for i in range(WeekDay.TUESDAY)], [0, 1, 2])
lst = list(WeekDay)
self.assertEqual(len(lst), len(WeekDay))
self.assertEqual(len(WeekDay), 7)
target = 'SUNDAY MONDAY TUESDAY WEDNESDAY THURSDAY FRIDAY SATURDAY'
target = target.split()
for i, weekday in enumerate(target):
i += 1
e = WeekDay(i)
self.assertEqual(e, i)
self.assertEqual(int(e), i)
self.assertEqual(e.name, weekday)
self.assertTrue(e in WeekDay)
self.assertEqual(lst.index(e)+1, i)
self.assertTrue(0 < e < 8)
self.assertTrue(type(e) is WeekDay)
self.assertTrue(isinstance(e, int))
self.assertTrue(isinstance(e, Enum))
def test_intenum_duplicates(self):
class WeekDay(IntEnum):
__order__ = 'SUNDAY MONDAY TUESDAY WEDNESDAY THURSDAY FRIDAY SATURDAY'
SUNDAY = 1
MONDAY = 2
TUESDAY = TEUSDAY = 3
WEDNESDAY = 4
THURSDAY = 5
FRIDAY = 6
SATURDAY = 7
self.assertTrue(WeekDay.TEUSDAY is WeekDay.TUESDAY)
self.assertEqual(WeekDay(3).name, 'TUESDAY')
self.assertEqual([k for k,v in WeekDay.__members__.items()
if v.name != k], ['TEUSDAY', ])
def test_pickle_enum(self):
if isinstance(Stooges, Exception):
raise Stooges
test_pickle_dump_load(self.assertTrue, Stooges.CURLY)
test_pickle_dump_load(self.assertTrue, Stooges)
def test_pickle_int(self):
if isinstance(IntStooges, Exception):
raise IntStooges
test_pickle_dump_load(self.assertTrue, IntStooges.CURLY)
test_pickle_dump_load(self.assertTrue, IntStooges)
def test_pickle_float(self):
if isinstance(FloatStooges, Exception):
raise FloatStooges
test_pickle_dump_load(self.assertTrue, FloatStooges.CURLY)
test_pickle_dump_load(self.assertTrue, FloatStooges)
def test_pickle_enum_function(self):
if isinstance(Answer, Exception):
raise Answer
test_pickle_dump_load(self.assertTrue, Answer.him)
test_pickle_dump_load(self.assertTrue, Answer)
def test_pickle_enum_function_with_module(self):
if isinstance(Question, Exception):
raise Question
test_pickle_dump_load(self.assertTrue, Question.who)
test_pickle_dump_load(self.assertTrue, Question)
if pyver == 3.4:
def test_class_nested_enum_and_pickle_protocol_four(self):
# would normally just have this directly in the class namespace
class NestedEnum(Enum):
twigs = 'common'
shiny = 'rare'
self.__class__.NestedEnum = NestedEnum
self.NestedEnum.__qualname__ = '%s.NestedEnum' % self.__class__.__name__
test_pickle_exception(
self.assertRaises, PicklingError, self.NestedEnum.twigs,
protocol=(0, 3))
test_pickle_dump_load(self.assertTrue, self.NestedEnum.twigs,
protocol=(4, HIGHEST_PROTOCOL))
elif pyver == 3.5:
def test_class_nested_enum_and_pickle_protocol_four(self):
# would normally just have this directly in the class namespace
class NestedEnum(Enum):
twigs = 'common'
shiny = 'rare'
self.__class__.NestedEnum = NestedEnum
self.NestedEnum.__qualname__ = '%s.NestedEnum' % self.__class__.__name__
test_pickle_dump_load(self.assertTrue, self.NestedEnum.twigs,
protocol=(0, HIGHEST_PROTOCOL))
def test_exploding_pickle(self):
BadPickle = Enum('BadPickle', 'dill sweet bread-n-butter')
enum._make_class_unpicklable(BadPickle)
globals()['BadPickle'] = BadPickle
test_pickle_exception(self.assertRaises, TypeError, BadPickle.dill)
test_pickle_exception(self.assertRaises, PicklingError, BadPickle)
def test_string_enum(self):
class SkillLevel(str, Enum):
master = 'what is the sound of one hand clapping?'
journeyman = 'why did the chicken cross the road?'
apprentice = 'knock, knock!'
self.assertEqual(SkillLevel.apprentice, 'knock, knock!')
def test_getattr_getitem(self):
class Period(Enum):
morning = 1
noon = 2
evening = 3
night = 4
self.assertTrue(Period(2) is Period.noon)
self.assertTrue(getattr(Period, 'night') is Period.night)
self.assertTrue(Period['morning'] is Period.morning)
def test_getattr_dunder(self):
Season = self.Season
self.assertTrue(getattr(Season, '__hash__'))
def test_iteration_order(self):
class Season(Enum):
__order__ = 'SUMMER WINTER AUTUMN SPRING'
SUMMER = 2
WINTER = 4
AUTUMN = 3
SPRING = 1
self.assertEqual(
list(Season),
[Season.SUMMER, Season.WINTER, Season.AUTUMN, Season.SPRING],
)
def test_iteration_order_reversed(self):
self.assertEqual(
list(reversed(self.Season)),
[self.Season.WINTER, self.Season.AUTUMN, self.Season.SUMMER,
self.Season.SPRING]
)
def test_iteration_order_with_unorderable_values(self):
class Complex(Enum):
a = complex(7, 9)
b = complex(3.14, 2)
c = complex(1, -1)
d = complex(-77, 32)
self.assertEqual(
list(Complex),
[Complex.a, Complex.b, Complex.c, Complex.d],
)
def test_programatic_function_string(self):
SummerMonth = Enum('SummerMonth', 'june july august')
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split()):
i += 1
e = SummerMonth(i)
self.assertEqual(int(e.value), i)
self.assertNotEqual(e, i)
self.assertEqual(e.name, month)
self.assertTrue(e in SummerMonth)
self.assertTrue(type(e) is SummerMonth)
def test_programatic_function_string_with_start(self):
SummerMonth = Enum('SummerMonth', 'june july august', start=10)
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split(), 10):
e = SummerMonth(i)
self.assertEqual(int(e.value), i)
self.assertNotEqual(e, i)
self.assertEqual(e.name, month)
self.assertTrue(e in SummerMonth)
self.assertTrue(type(e) is SummerMonth)
def test_programatic_function_string_list(self):
SummerMonth = Enum('SummerMonth', ['june', 'july', 'august'])
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split()):
i += 1
e = SummerMonth(i)
self.assertEqual(int(e.value), i)
self.assertNotEqual(e, i)
self.assertEqual(e.name, month)
self.assertTrue(e in SummerMonth)
self.assertTrue(type(e) is SummerMonth)
def test_programatic_function_string_list_with_start(self):
SummerMonth = Enum('SummerMonth', ['june', 'july', 'august'], start=20)
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split(), 20):
e = SummerMonth(i)
self.assertEqual(int(e.value), i)
self.assertNotEqual(e, i)
self.assertEqual(e.name, month)
self.assertTrue(e in SummerMonth)
self.assertTrue(type(e) is SummerMonth)
def test_programatic_function_iterable(self):
SummerMonth = Enum(
'SummerMonth',
(('june', 1), ('july', 2), ('august', 3))
)
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split()):
i += 1
e = SummerMonth(i)
self.assertEqual(int(e.value), i)
self.assertNotEqual(e, i)
self.assertEqual(e.name, month)
self.assertTrue(e in SummerMonth)
self.assertTrue(type(e) is SummerMonth)
def test_programatic_function_from_dict(self):
SummerMonth = Enum(
'SummerMonth',
dict((('june', 1), ('july', 2), ('august', 3)))
)
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
if pyver < 3.0:
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split()):
i += 1
e = SummerMonth(i)
self.assertEqual(int(e.value), i)
self.assertNotEqual(e, i)
self.assertEqual(e.name, month)
self.assertTrue(e in SummerMonth)
self.assertTrue(type(e) is SummerMonth)
def test_programatic_function_type(self):
SummerMonth = Enum('SummerMonth', 'june july august', type=int)
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split()):
i += 1
e = SummerMonth(i)
self.assertEqual(e, i)
self.assertEqual(e.name, month)
self.assertTrue(e in SummerMonth)
self.assertTrue(type(e) is SummerMonth)
def test_programatic_function_type_with_start(self):
SummerMonth = Enum('SummerMonth', 'june july august', type=int, start=30)
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split(), 30):
e = SummerMonth(i)
self.assertEqual(e, i)
self.assertEqual(e.name, month)
self.assertTrue(e in SummerMonth)
self.assertTrue(type(e) is SummerMonth)
def test_programatic_function_type_from_subclass(self):
SummerMonth = IntEnum('SummerMonth', 'june july august')
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split()):
i += 1
e = SummerMonth(i)
self.assertEqual(e, i)
self.assertEqual(e.name, month)
self.assertTrue(e in SummerMonth)
self.assertTrue(type(e) is SummerMonth)
def test_programatic_function_type_from_subclass_with_start(self):
SummerMonth = IntEnum('SummerMonth', 'june july august', start=40)
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split(), 40):
e = SummerMonth(i)
self.assertEqual(e, i)
self.assertEqual(e.name, month)
self.assertTrue(e in SummerMonth)
self.assertTrue(type(e) is SummerMonth)
def test_programatic_function_unicode(self):
SummerMonth = Enum('SummerMonth', unicode('june july august'))
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate(unicode('june july august').split()):
i += 1
e = SummerMonth(i)
self.assertEqual(int(e.value), i)
self.assertNotEqual(e, i)
self.assertEqual(e.name, month)
self.assertTrue(e in SummerMonth)
self.assertTrue(type(e) is SummerMonth)
def test_programatic_function_unicode_list(self):
SummerMonth = Enum('SummerMonth', [unicode('june'), unicode('july'), unicode('august')])
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate(unicode('june july august').split()):
i += 1
e = SummerMonth(i)
self.assertEqual(int(e.value), i)
self.assertNotEqual(e, i)
self.assertEqual(e.name, month)
self.assertTrue(e in SummerMonth)
self.assertTrue(type(e) is SummerMonth)
def test_programatic_function_unicode_iterable(self):
SummerMonth = Enum(
'SummerMonth',
((unicode('june'), 1), (unicode('july'), 2), (unicode('august'), 3))
)
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate(unicode('june july august').split()):
i += 1
e = SummerMonth(i)
self.assertEqual(int(e.value), i)
self.assertNotEqual(e, i)
self.assertEqual(e.name, month)
self.assertTrue(e in SummerMonth)
self.assertTrue(type(e) is SummerMonth)
def test_programatic_function_from_unicode_dict(self):
SummerMonth = Enum(
'SummerMonth',
dict(((unicode('june'), 1), (unicode('july'), 2), (unicode('august'), 3)))
)
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
if pyver < 3.0:
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate(unicode('june july august').split()):
i += 1
e = SummerMonth(i)
self.assertEqual(int(e.value), i)
self.assertNotEqual(e, i)
self.assertEqual(e.name, month)
self.assertTrue(e in SummerMonth)
self.assertTrue(type(e) is SummerMonth)
def test_programatic_function_unicode_type(self):
SummerMonth = Enum('SummerMonth', unicode('june july august'), type=int)
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate(unicode('june july august').split()):
i += 1
e = SummerMonth(i)
self.assertEqual(e, i)
self.assertEqual(e.name, month)
self.assertTrue(e in SummerMonth)
self.assertTrue(type(e) is SummerMonth)
def test_programatic_function_unicode_type_from_subclass(self):
SummerMonth = IntEnum('SummerMonth', unicode('june july august'))
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate(unicode('june july august').split()):
i += 1
e = SummerMonth(i)
self.assertEqual(e, i)
self.assertEqual(e.name, month)
self.assertTrue(e in SummerMonth)
self.assertTrue(type(e) is SummerMonth)
def test_programmatic_function_unicode_class(self):
if pyver < 3.0:
class_names = unicode('SummerMonth'), 'S\xfcmm\xe9rM\xf6nth'.decode('latin1')
else:
class_names = 'SummerMonth', 'S\xfcmm\xe9rM\xf6nth'
for i, class_name in enumerate(class_names):
if pyver < 3.0 and i == 1:
self.assertRaises(TypeError, Enum, class_name, unicode('june july august'))
else:
SummerMonth = Enum(class_name, unicode('june july august'))
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate(unicode('june july august').split()):
i += 1
e = SummerMonth(i)
self.assertEqual(e.value, i)
self.assertEqual(e.name, month)
self.assertTrue(e in SummerMonth)
self.assertTrue(type(e) is SummerMonth)
def test_subclassing(self):
if isinstance(Name, Exception):
raise Name
self.assertEqual(Name.BDFL, 'Guido van Rossum')
self.assertTrue(Name.BDFL, Name('Guido van Rossum'))
self.assertTrue(Name.BDFL is getattr(Name, 'BDFL'))
test_pickle_dump_load(self.assertTrue, Name.BDFL)
def test_extending(self):
def bad_extension():
class Color(Enum):
red = 1
green = 2
blue = 3
class MoreColor(Color):
cyan = 4
magenta = 5
yellow = 6
self.assertRaises(TypeError, bad_extension)
def test_exclude_methods(self):
class whatever(Enum):
this = 'that'
these = 'those'
def really(self):
return 'no, not %s' % self.value
self.assertFalse(type(whatever.really) is whatever)
self.assertEqual(whatever.this.really(), 'no, not that')
def test_wrong_inheritance_order(self):
def wrong_inherit():
class Wrong(Enum, str):
NotHere = 'error before this point'
self.assertRaises(TypeError, wrong_inherit)
def test_intenum_transitivity(self):
class number(IntEnum):
one = 1
two = 2
three = 3
class numero(IntEnum):
uno = 1
dos = 2
tres = 3
self.assertEqual(number.one, numero.uno)
self.assertEqual(number.two, numero.dos)
self.assertEqual(number.three, numero.tres)
def test_introspection(self):
class Number(IntEnum):
one = 100
two = 200
self.assertTrue(Number.one._member_type_ is int)
self.assertTrue(Number._member_type_ is int)
class String(str, Enum):
yarn = 'soft'
rope = 'rough'
wire = 'hard'
self.assertTrue(String.yarn._member_type_ is str)
self.assertTrue(String._member_type_ is str)
class Plain(Enum):
vanilla = 'white'
one = 1
self.assertTrue(Plain.vanilla._member_type_ is object)
self.assertTrue(Plain._member_type_ is object)
def test_wrong_enum_in_call(self):
class Monochrome(Enum):
black = 0
white = 1
class Gender(Enum):
male = 0
female = 1
self.assertRaises(ValueError, Monochrome, Gender.male)
def test_wrong_enum_in_mixed_call(self):
class Monochrome(IntEnum):
black = 0
white = 1
class Gender(Enum):
male = 0
female = 1
self.assertRaises(ValueError, Monochrome, Gender.male)
def test_mixed_enum_in_call_1(self):
class Monochrome(IntEnum):
black = 0
white = 1
class Gender(IntEnum):
male = 0
female = 1
self.assertTrue(Monochrome(Gender.female) is Monochrome.white)
def test_mixed_enum_in_call_2(self):
class Monochrome(Enum):
black = 0
white = 1
class Gender(IntEnum):
male = 0
female = 1
self.assertTrue(Monochrome(Gender.male) is Monochrome.black)
def test_flufl_enum(self):
class Fluflnum(Enum):
def __int__(self):
return int(self.value)
class MailManOptions(Fluflnum):
option1 = 1
option2 = 2
option3 = 3
self.assertEqual(int(MailManOptions.option1), 1)
def test_no_such_enum_member(self):
class Color(Enum):
red = 1
green = 2
blue = 3
self.assertRaises(ValueError, Color, 4)
self.assertRaises(KeyError, Color.__getitem__, 'chartreuse')
def test_new_repr(self):
class Color(Enum):
red = 1
green = 2
blue = 3
def __repr__(self):
return "don't you just love shades of %s?" % self.name
self.assertEqual(
repr(Color.blue),
"don't you just love shades of blue?",
)
def test_inherited_repr(self):
class MyEnum(Enum):
def __repr__(self):
return "My name is %s." % self.name
class MyIntEnum(int, MyEnum):
this = 1
that = 2
theother = 3
self.assertEqual(repr(MyIntEnum.that), "My name is that.")
def test_multiple_mixin_mro(self):
class auto_enum(EnumMeta):
def __new__(metacls, cls, bases, classdict):
original_dict = classdict
classdict = enum._EnumDict()
for k, v in original_dict.items():
classdict[k] = v
temp = type(classdict)()
names = set(classdict._member_names)
i = 0
for k in classdict._member_names:
v = classdict[k]
if v == ():
v = i
else:
i = v
i += 1
temp[k] = v
for k, v in classdict.items():
if k not in names:
temp[k] = v
return super(auto_enum, metacls).__new__(
metacls, cls, bases, temp)
AutoNumberedEnum = auto_enum('AutoNumberedEnum', (Enum,), {})
AutoIntEnum = auto_enum('AutoIntEnum', (IntEnum,), {})
class TestAutoNumber(AutoNumberedEnum):
a = ()
b = 3
c = ()
class TestAutoInt(AutoIntEnum):
a = ()
b = 3
c = ()
def test_subclasses_with_getnewargs(self):
class NamedInt(int):
__qualname__ = 'NamedInt' # needed for pickle protocol 4
def __new__(cls, *args):
_args = args
if len(args) < 1:
raise TypeError("name and value must be specified")
name, args = args[0], args[1:]
self = int.__new__(cls, *args)
self._intname = name
self._args = _args
return self
def __getnewargs__(self):
return self._args
@property
def __name__(self):
return self._intname
def __repr__(self):
# repr() is updated to include the name and type info
return "%s(%r, %s)" % (type(self).__name__,
self.__name__,
int.__repr__(self))
def __str__(self):
# str() is unchanged, even if it relies on the repr() fallback
base = int
base_str = base.__str__
if base_str.__objclass__ is object:
return base.__repr__(self)
return base_str(self)
# for simplicity, we only define one operator that
# propagates expressions
def __add__(self, other):
temp = int(self) + int( other)
if isinstance(self, NamedInt) and isinstance(other, NamedInt):
return NamedInt(
'(%s + %s)' % (self.__name__, other.__name__),
temp )
else:
return temp
class NEI(NamedInt, Enum):
__qualname__ = 'NEI' # needed for pickle protocol 4
x = ('the-x', 1)
y = ('the-y', 2)
self.assertTrue(NEI.__new__ is Enum.__new__)
self.assertEqual(repr(NEI.x + NEI.y), "NamedInt('(the-x + the-y)', 3)")
globals()['NamedInt'] = NamedInt
globals()['NEI'] = NEI
NI5 = NamedInt('test', 5)
self.assertEqual(NI5, 5)
test_pickle_dump_load(self.assertTrue, NI5, 5)
self.assertEqual(NEI.y.value, 2)
test_pickle_dump_load(self.assertTrue, NEI.y)
if pyver >= 3.4:
def test_subclasses_with_getnewargs_ex(self):
class NamedInt(int):
__qualname__ = 'NamedInt' # needed for pickle protocol 4
def __new__(cls, *args):
_args = args
if len(args) < 2:
raise TypeError("name and value must be specified")
name, args = args[0], args[1:]
self = int.__new__(cls, *args)
self._intname = name
self._args = _args
return self
def __getnewargs_ex__(self):
return self._args, {}
@property
def __name__(self):
return self._intname
def __repr__(self):
# repr() is updated to include the name and type info
return "{}({!r}, {})".format(type(self).__name__,
self.__name__,
int.__repr__(self))
def __str__(self):
# str() is unchanged, even if it relies on the repr() fallback
base = int
base_str = base.__str__
if base_str.__objclass__ is object:
return base.__repr__(self)
return base_str(self)
# for simplicity, we only define one operator that
# propagates expressions
def __add__(self, other):
temp = int(self) + int( other)
if isinstance(self, NamedInt) and isinstance(other, NamedInt):
return NamedInt(
'({0} + {1})'.format(self.__name__, other.__name__),
temp )
else:
return temp
class NEI(NamedInt, Enum):
__qualname__ = 'NEI' # needed for pickle protocol 4
x = ('the-x', 1)
y = ('the-y', 2)
self.assertIs(NEI.__new__, Enum.__new__)
self.assertEqual(repr(NEI.x + NEI.y), "NamedInt('(the-x + the-y)', 3)")
globals()['NamedInt'] = NamedInt
globals()['NEI'] = NEI
NI5 = NamedInt('test', 5)
self.assertEqual(NI5, 5)
test_pickle_dump_load(self.assertEqual, NI5, 5, protocol=(4, HIGHEST_PROTOCOL))
self.assertEqual(NEI.y.value, 2)
test_pickle_dump_load(self.assertTrue, NEI.y, protocol=(4, HIGHEST_PROTOCOL))
def test_subclasses_with_reduce(self):
class NamedInt(int):
__qualname__ = 'NamedInt' # needed for pickle protocol 4
def __new__(cls, *args):
_args = args
if len(args) < 1:
raise TypeError("name and value must be specified")
name, args = args[0], args[1:]
self = int.__new__(cls, *args)
self._intname = name
self._args = _args
return self
def __reduce__(self):
return self.__class__, self._args
@property
def __name__(self):
return self._intname
def __repr__(self):
# repr() is updated to include the name and type info
return "%s(%r, %s)" % (type(self).__name__,
self.__name__,
int.__repr__(self))
def __str__(self):
# str() is unchanged, even if it relies on the repr() fallback
base = int
base_str = base.__str__
if base_str.__objclass__ is object:
return base.__repr__(self)
return base_str(self)
# for simplicity, we only define one operator that
# propagates expressions
def __add__(self, other):
temp = int(self) + int( other)
if isinstance(self, NamedInt) and isinstance(other, NamedInt):
return NamedInt(
'(%s + %s)' % (self.__name__, other.__name__),
temp )
else:
return temp
class NEI(NamedInt, Enum):
__qualname__ = 'NEI' # needed for pickle protocol 4
x = ('the-x', 1)
y = ('the-y', 2)
self.assertTrue(NEI.__new__ is Enum.__new__)
self.assertEqual(repr(NEI.x + NEI.y), "NamedInt('(the-x + the-y)', 3)")
globals()['NamedInt'] = NamedInt
globals()['NEI'] = NEI
NI5 = NamedInt('test', 5)
self.assertEqual(NI5, 5)
test_pickle_dump_load(self.assertEqual, NI5, 5)
self.assertEqual(NEI.y.value, 2)
test_pickle_dump_load(self.assertTrue, NEI.y)
def test_subclasses_with_reduce_ex(self):
class NamedInt(int):
__qualname__ = 'NamedInt' # needed for pickle protocol 4
def __new__(cls, *args):
_args = args
if len(args) < 1:
raise TypeError("name and value must be specified")
name, args = args[0], args[1:]
self = int.__new__(cls, *args)
self._intname = name
self._args = _args
return self
def __reduce_ex__(self, proto):
return self.__class__, self._args
@property
def __name__(self):
return self._intname
def __repr__(self):
# repr() is updated to include the name and type info
return "%s(%r, %s)" % (type(self).__name__,
self.__name__,
int.__repr__(self))
def __str__(self):
# str() is unchanged, even if it relies on the repr() fallback
base = int
base_str = base.__str__
if base_str.__objclass__ is object:
return base.__repr__(self)
return base_str(self)
# for simplicity, we only define one operator that
# propagates expressions
def __add__(self, other):
temp = int(self) + int( other)
if isinstance(self, NamedInt) and isinstance(other, NamedInt):
return NamedInt(
'(%s + %s)' % (self.__name__, other.__name__),
temp )
else:
return temp
class NEI(NamedInt, Enum):
__qualname__ = 'NEI' # needed for pickle protocol 4
x = ('the-x', 1)
y = ('the-y', 2)
self.assertTrue(NEI.__new__ is Enum.__new__)
self.assertEqual(repr(NEI.x + NEI.y), "NamedInt('(the-x + the-y)', 3)")
globals()['NamedInt'] = NamedInt
globals()['NEI'] = NEI
NI5 = NamedInt('test', 5)
self.assertEqual(NI5, 5)
test_pickle_dump_load(self.assertEqual, NI5, 5)
self.assertEqual(NEI.y.value, 2)
test_pickle_dump_load(self.assertTrue, NEI.y)
def test_subclasses_without_direct_pickle_support(self):
class NamedInt(int):
__qualname__ = 'NamedInt'
def __new__(cls, *args):
_args = args
name, args = args[0], args[1:]
if len(args) == 0:
raise TypeError("name and value must be specified")
self = int.__new__(cls, *args)
self._intname = name
self._args = _args
return self
@property
def __name__(self):
return self._intname
def __repr__(self):
# repr() is updated to include the name and type info
return "%s(%r, %s)" % (type(self).__name__,
self.__name__,
int.__repr__(self))
def __str__(self):
# str() is unchanged, even if it relies on the repr() fallback
base = int
base_str = base.__str__
if base_str.__objclass__ is object:
return base.__repr__(self)
return base_str(self)
# for simplicity, we only define one operator that
# propagates expressions
def __add__(self, other):
temp = int(self) + int( other)
if isinstance(self, NamedInt) and isinstance(other, NamedInt):
return NamedInt(
'(%s + %s)' % (self.__name__, other.__name__),
temp )
else:
return temp
class NEI(NamedInt, Enum):
__qualname__ = 'NEI'
x = ('the-x', 1)
y = ('the-y', 2)
self.assertTrue(NEI.__new__ is Enum.__new__)
self.assertEqual(repr(NEI.x + NEI.y), "NamedInt('(the-x + the-y)', 3)")
globals()['NamedInt'] = NamedInt
globals()['NEI'] = NEI
NI5 = NamedInt('test', 5)
self.assertEqual(NI5, 5)
self.assertEqual(NEI.y.value, 2)
test_pickle_exception(self.assertRaises, TypeError, NEI.x)
test_pickle_exception(self.assertRaises, PicklingError, NEI)
def test_subclasses_without_direct_pickle_support_using_name(self):
class NamedInt(int):
__qualname__ = 'NamedInt'
def __new__(cls, *args):
_args = args
name, args = args[0], args[1:]
if len(args) == 0:
raise TypeError("name and value must be specified")
self = int.__new__(cls, *args)
self._intname = name
self._args = _args
return self
@property
def __name__(self):
return self._intname
def __repr__(self):
# repr() is updated to include the name and type info
return "%s(%r, %s)" % (type(self).__name__,
self.__name__,
int.__repr__(self))
def __str__(self):
# str() is unchanged, even if it relies on the repr() fallback
base = int
base_str = base.__str__
if base_str.__objclass__ is object:
return base.__repr__(self)
return base_str(self)
# for simplicity, we only define one operator that
# propagates expressions
def __add__(self, other):
temp = int(self) + int( other)
if isinstance(self, NamedInt) and isinstance(other, NamedInt):
return NamedInt(
'(%s + %s)' % (self.__name__, other.__name__),
temp )
else:
return temp
class NEI(NamedInt, Enum):
__qualname__ = 'NEI'
x = ('the-x', 1)
y = ('the-y', 2)
def __reduce_ex__(self, proto):
return getattr, (self.__class__, self._name_)
self.assertTrue(NEI.__new__ is Enum.__new__)
self.assertEqual(repr(NEI.x + NEI.y), "NamedInt('(the-x + the-y)', 3)")
globals()['NamedInt'] = NamedInt
globals()['NEI'] = NEI
NI5 = NamedInt('test', 5)
self.assertEqual(NI5, 5)
self.assertEqual(NEI.y.value, 2)
test_pickle_dump_load(self.assertTrue, NEI.y)
test_pickle_dump_load(self.assertTrue, NEI)
def test_tuple_subclass(self):
class SomeTuple(tuple, Enum):
__qualname__ = 'SomeTuple'
first = (1, 'for the money')
second = (2, 'for the show')
third = (3, 'for the music')
self.assertTrue(type(SomeTuple.first) is SomeTuple)
self.assertTrue(isinstance(SomeTuple.second, tuple))
self.assertEqual(SomeTuple.third, (3, 'for the music'))
globals()['SomeTuple'] = SomeTuple
test_pickle_dump_load(self.assertTrue, SomeTuple.first)
def test_duplicate_values_give_unique_enum_items(self):
class AutoNumber(Enum):
__order__ = 'enum_m enum_d enum_y'
enum_m = ()
enum_d = ()
enum_y = ()
def __new__(cls):
value = len(cls.__members__) + 1
obj = object.__new__(cls)
obj._value_ = value
return obj
def __int__(self):
return int(self._value_)
self.assertEqual(int(AutoNumber.enum_d), 2)
self.assertEqual(AutoNumber.enum_y.value, 3)
self.assertTrue(AutoNumber(1) is AutoNumber.enum_m)
self.assertEqual(
list(AutoNumber),
[AutoNumber.enum_m, AutoNumber.enum_d, AutoNumber.enum_y],
)
def test_inherited_new_from_enhanced_enum(self):
class AutoNumber2(Enum):
def __new__(cls):
value = len(cls.__members__) + 1
obj = object.__new__(cls)
obj._value_ = value
return obj
def __int__(self):
return int(self._value_)
class Color(AutoNumber2):
__order__ = 'red green blue'
red = ()
green = ()
blue = ()
self.assertEqual(len(Color), 3, "wrong number of elements: %d (should be %d)" % (len(Color), 3))
self.assertEqual(list(Color), [Color.red, Color.green, Color.blue])
if pyver >= 3.0:
self.assertEqual(list(map(int, Color)), [1, 2, 3])
def test_inherited_new_from_mixed_enum(self):
class AutoNumber3(IntEnum):
def __new__(cls):
value = len(cls.__members__) + 1
obj = int.__new__(cls, value)
obj._value_ = value
return obj
class Color(AutoNumber3):
red = ()
green = ()
blue = ()
self.assertEqual(len(Color), 3, "wrong number of elements: %d (should be %d)" % (len(Color), 3))
Color.red
Color.green
Color.blue
def test_equality(self):
class AlwaysEqual:
def __eq__(self, other):
return True
class OrdinaryEnum(Enum):
a = 1
self.assertEqual(AlwaysEqual(), OrdinaryEnum.a)
self.assertEqual(OrdinaryEnum.a, AlwaysEqual())
def test_ordered_mixin(self):
class OrderedEnum(Enum):
def __ge__(self, other):
if self.__class__ is other.__class__:
return self._value_ >= other._value_
return NotImplemented
def __gt__(self, other):
if self.__class__ is other.__class__:
return self._value_ > other._value_
return NotImplemented
def __le__(self, other):
if self.__class__ is other.__class__:
return self._value_ <= other._value_
return NotImplemented
def __lt__(self, other):
if self.__class__ is other.__class__:
return self._value_ < other._value_
return NotImplemented
class Grade(OrderedEnum):
__order__ = 'A B C D F'
A = 5
B = 4
C = 3
D = 2
F = 1
self.assertEqual(list(Grade), [Grade.A, Grade.B, Grade.C, Grade.D, Grade.F])
self.assertTrue(Grade.A > Grade.B)
self.assertTrue(Grade.F <= Grade.C)
self.assertTrue(Grade.D < Grade.A)
self.assertTrue(Grade.B >= Grade.B)
def test_extending2(self):
def bad_extension():
class Shade(Enum):
def shade(self):
print(self.name)
class Color(Shade):
red = 1
green = 2
blue = 3
class MoreColor(Color):
cyan = 4
magenta = 5
yellow = 6
self.assertRaises(TypeError, bad_extension)
def test_extending3(self):
class Shade(Enum):
def shade(self):
return self.name
class Color(Shade):
def hex(self):
return '%s hexlified!' % self.value
class MoreColor(Color):
cyan = 4
magenta = 5
yellow = 6
self.assertEqual(MoreColor.magenta.hex(), '5 hexlified!')
def test_no_duplicates(self):
def bad_duplicates():
class UniqueEnum(Enum):
def __init__(self, *args):
cls = self.__class__
if any(self.value == e.value for e in cls):
a = self.name
e = cls(self.value).name
raise ValueError(
"aliases not allowed in UniqueEnum: %r --> %r"
% (a, e)
)
class Color(UniqueEnum):
red = 1
green = 2
blue = 3
class Color(UniqueEnum):
red = 1
green = 2
blue = 3
grene = 2
self.assertRaises(ValueError, bad_duplicates)
def test_init(self):
class Planet(Enum):
MERCURY = (3.303e+23, 2.4397e6)
VENUS = (4.869e+24, 6.0518e6)
EARTH = (5.976e+24, 6.37814e6)
MARS = (6.421e+23, 3.3972e6)
JUPITER = (1.9e+27, 7.1492e7)
SATURN = (5.688e+26, 6.0268e7)
URANUS = (8.686e+25, 2.5559e7)
NEPTUNE = (1.024e+26, 2.4746e7)
def __init__(self, mass, radius):
self.mass = mass # in kilograms
self.radius = radius # in meters
@property
def surface_gravity(self):
# universal gravitational constant (m3 kg-1 s-2)
G = 6.67300E-11
return G * self.mass / (self.radius * self.radius)
self.assertEqual(round(Planet.EARTH.surface_gravity, 2), 9.80)
self.assertEqual(Planet.EARTH.value, (5.976e+24, 6.37814e6))
def test_nonhash_value(self):
class AutoNumberInAList(Enum):
def __new__(cls):
value = [len(cls.__members__) + 1]
obj = object.__new__(cls)
obj._value_ = value
return obj
class ColorInAList(AutoNumberInAList):
__order__ = 'red green blue'
red = ()
green = ()
blue = ()
self.assertEqual(list(ColorInAList), [ColorInAList.red, ColorInAList.green, ColorInAList.blue])
self.assertEqual(ColorInAList.red.value, [1])
self.assertEqual(ColorInAList([1]), ColorInAList.red)
def test_conflicting_types_resolved_in_new(self):
class LabelledIntEnum(int, Enum):
def __new__(cls, *args):
value, label = args
obj = int.__new__(cls, value)
obj.label = label
obj._value_ = value
return obj
class LabelledList(LabelledIntEnum):
unprocessed = (1, "Unprocessed")
payment_complete = (2, "Payment Complete")
self.assertEqual(list(LabelledList), [LabelledList.unprocessed, LabelledList.payment_complete])
self.assertEqual(LabelledList.unprocessed, 1)
self.assertEqual(LabelledList(1), LabelledList.unprocessed)
def test_empty_with_functional_api(self):
empty = enum.IntEnum('Foo', {})
self.assertEqual(len(empty), 0)
class TestUnique(unittest.TestCase):
"""2.4 doesn't allow class decorators, use function syntax."""
def test_unique_clean(self):
class Clean(Enum):
one = 1
two = 'dos'
tres = 4.0
unique(Clean)
class Cleaner(IntEnum):
single = 1
double = 2
triple = 3
unique(Cleaner)
def test_unique_dirty(self):
try:
class Dirty(Enum):
__order__ = 'one two tres'
one = 1
two = 'dos'
tres = 1
unique(Dirty)
except ValueError:
exc = sys.exc_info()[1]
message = exc.args[0]
self.assertTrue('tres -> one' in message)
try:
class Dirtier(IntEnum):
__order__ = 'single double triple turkey'
single = 1
double = 1
triple = 3
turkey = 3
unique(Dirtier)
except ValueError:
exc = sys.exc_info()[1]
message = exc.args[0]
self.assertTrue('double -> single' in message)
self.assertTrue('turkey -> triple' in message)
class TestMe(unittest.TestCase):
pass
if __name__ == '__main__':
unittest.main()
| kharts/kastodi | resources/lib/enum/test_enum.py | Python | gpl-2.0 | 66,976 | [
"MOE"
] | dcdf9c64566cab4f11a9c1126e4c4d3cacce4838403cf1fcd2cf2e50eeb71b9e |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright © 2013 by Lasse Fister <commander@graphicore.de>
#
# This file is part of Multitoner.
#
# Multitoner is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Multitoner is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import division, print_function, unicode_literals
import os
from gi.repository import Gtk, GObject, Gdk, GLib, GdkPixbuf
from gtk_actiongroup import ActionGroup
import cairo
from weakref import ref as weakref
import math
from compatibility import repair_gsignals, decode
from gtk_dialogs import show_open_image_dialog, show_message, show_save_as_eps_dialog
from mtt2eps import model2eps
__all__ = ['PreviewWindow']
DIRECTORY = decode(os.path.dirname(os.path.realpath(__file__)))
# just a preparation for i18n
def _(string):
return string
UI_INFO = """
<ui>
<menubar name='MenuBar'>
<menu action="FileMenu">
<menuitem action='OpenImage' />
<separator />
<menuitem action='ExportImage' />
<separator />
<menuitem action='Quit' />
</menu>
<menu action="EditMenu">
<menuitem action='ZoomIn' />
<menuitem action='ZoomOut' />
<menuitem action='ZoomUnit' />
<menuitem action='ZoomFit' />
<separator />
<menuitem action='RotateRight' />
<menuitem action='RotateLeft' />
</menu>
</menubar>
<toolbar name="ToolBar">
<toolitem action='OpenImage' />
<separator />
<toolitem action='ExportImage' />
<separator />
<toolitem action='ZoomIn' />
<toolitem action='ZoomOut' />
<toolitem action='ZoomUnit' />
<toolitem action='ZoomFit' />
<separator />
<toolitem action='RotateRight' />
<toolitem action='RotateLeft' />
<separator />
<toolitem action='Quit' />
</toolbar>
</ui>
"""
class Canvas(Gtk.Viewport):
""" Handle the display and transformation of a cairo_surface """
__gsignals__ = repair_gsignals({
'scale-to-fit-changed': (GObject.SIGNAL_RUN_LAST, GObject.TYPE_NONE, (
# the value of scale_to_fit
GObject.TYPE_BOOLEAN, ))
})
def __init__(self, *args):
Gtk.Viewport.__init__(self, *args)
self._transformed_pattern_cache = (None, None)
self._source_surface = None
self._center = None
self._restoring_center = False
self._timers = {}
self.da = Gtk.DrawingArea()
self.add(self.da)
self._init_event_handlers()
def _init_event_handlers(self):
self.da.connect('draw' , self.draw_handler)
self.da.add_events(
Gdk.EventMask.STRUCTURE_MASK # needed for configure-event
)
self.da.connect('configure-event', self.configure_handler)
self.get_vadjustment().connect(
'value-changed', self.adjustment_value_changed_handler)
self.get_hadjustment().connect(
'value-changed', self.adjustment_value_changed_handler)
# this will keep the center of the image in the center of the window
# when the window is beeing resized
self.add_events(
Gdk.EventMask.STRUCTURE_MASK # needed for configure-event
)
def realize_handler(widget, *args):
"""
closure to connect to window when it establishes this widget
"""
window = widget.get_toplevel()
window.connect('configure-event', self.toplevel_configure_handler)
parent = widget.get_parent()
parent.connect('size-allocate', self.parent_size_allocate_handler)
widget.disconnect(realize_handler_id)
# save realize_handler_id for the closure of onRealize
realize_handler_id = self.connect('realize' , realize_handler)
def _get_bbox(self, matrix, x1, y1, x2, y2):
"""
transform the rectangle defined by x1, y1, x2, y2 and return
the bounding box of the result rectangle
"""
in_points = ( (x1, y1)
, (x1, y2)
, (x2, y1)
, (x2, y2)
)
out_points = [matrix.transform_point(x, y) for x, y in in_points]
xs, ys = zip(*out_points)
max_x = max(*xs)
max_y = max(*ys)
min_x = min(*xs)
min_y = min(*ys)
return min_x, min_y, max_x, max_y
def _get_bbox_extents(self, matrix, x1, y1, x2, y2):
"""
apply matrix to the rectangle defined by x1, y1, x2, y2
returns width, height, offset_x, offset_y of the bounding box
this is used to determine the space needed to draw the surface
and to move the contents back into view using the offsets
"""
x1, y1, x2, y2 = self._get_bbox(matrix, x1, y1, x2, y2)
w = int(math.ceil(x2-x1))
h = int(math.ceil(y2-y1))
offset_x, offset_y = x1, y1
return w, h, offset_x, offset_y
def _get_surface_extents(self, matrix, surface):
"""
get the extents and offsets of surface after the application
of matrix
"""
x1, y1, x2, y2 = 0, 0, surface.get_width(), surface.get_height()
return self._get_bbox_extents(matrix, x1, y1, x2, y2)
def _get_rotated_matrix(self):
""" matrix with rotation but without scale"""
matrix = cairo.Matrix()
matrix.rotate(self.rotation * math.pi)
# rotate?
return matrix
def _save_center(self):
if self._restoring_center == True:
return
center = (
# (scrollbar position + screen width / 2)) / image width
(self.get_hadjustment().get_value() + (self.get_allocated_width() / 2)) / self.da.get_allocated_width()
# (scrollbar position + screen height / 2)) / image height
, (self.get_vadjustment().get_value() + (self.get_allocated_height() / 2)) / self.da.get_allocated_height()
)
self._center = center
return center
def _get_scaled_matrix(self):
""" matrix with rotation and scale"""
matrix = self._get_rotated_matrix()
matrix.scale(self.scale, self.scale)
return matrix
def _resize(self):
# needs bounding box width and height after all transformations
if self._source_surface is not None:
matrix = self._get_scaled_matrix()
w, h, _, _ = self._get_surface_extents(matrix, self._source_surface)
else:
w = h = 0
self.da.set_size_request(w, h)
def _set_scale(self, value):
""" will set the new scale"""
if self.scale == value:
return
self._scale = value
self._save_center()
self._resize()
self.da.queue_draw()
def _set_fitting_scale(self, available_width, available_height):
"""
set the scale to a value that makes the image fit exactly into
available_width and available_height
"""
if self._source_surface is None:
return
# needs unscaled width and unscaled height, so the matrix must not
# be scaled, the rotation however is needed
matrix = self._get_rotated_matrix()
source_width, source_height, _, _ = self._get_surface_extents(matrix, self._source_surface)
try:
aspect_ratio = source_width / source_height
available_aspect_ratio = available_width / available_height
except ZeroDivisionError:
self._set_scale(1)
else:
if aspect_ratio > available_aspect_ratio:
# fit to width
self._set_scale(available_width / source_width)
else:
# fit to height
self._set_scale(available_height / source_height)
def set_fitting_scale(self):
parent = self.get_parent()
if parent is None:
return
parent_allocation = parent.get_allocation()
self._set_fitting_scale(parent_allocation.width, parent_allocation.height)
def receive_surface(self, surface):
self._source_surface = surface
if not hasattr(self, '_scale') or self.scale_to_fit:
self.set_fitting_scale()
else:
self._resize()
self.da.queue_draw()
def _restore_center(self):
if self._center is None:
return
self._restoring_center = True
try:
h, v = self._center
# image width * center of view - screen width / 2
left = self.da.get_allocated_width() * h - self.get_allocated_width() / 2
# image height * center of view - screen height / 2
top = self.da.get_allocated_height() * v - self.get_allocated_height() / 2
self.get_hadjustment().set_value(left)
self.get_vadjustment().set_value(top)
finally:
self._restoring_center = False
def configure_handler(self, widget, event):
"""
the configure event signals when the DrawingArea got resized
happens after receive_surface and can be handled immediately
"""
if not self.scale_to_fit:
self._restore_center()
""" when the toplevel window got resized """
toplevel_configure_handler = configure_handler
def parent_size_allocate_handler(self, parent, allocation):
if self.scale_to_fit:
self._set_fitting_scale(allocation.width, allocation.height)
def adjustment_value_changed_handler(self, adjustment):
self._save_center()
@property
def rotation(self):
return getattr(self, '_rotation', 0)
@rotation.setter
def rotation(self, value):
""" between 0 and 2 will be multiplied with PI => radians """
self._rotation = value % 2
if self.scale_to_fit:
self.set_fitting_scale()
self._resize()
self.da.queue_draw()
def add_rotation(self, value):
self.rotation += value
@property
def scale(self):
return getattr(self, '_scale', 1.0)
@scale.setter
def scale(self, value):
""" will turn off scale to fit and set the new scale"""
self.scale_to_fit = False
self._set_scale(value)
@property
def scale_to_fit(self):
return getattr(self, '_scale_to_fit', True)
@scale_to_fit.setter
def scale_to_fit(self, value):
old = self.scale_to_fit
self._scale_to_fit = not not value
if self._scale_to_fit != old:
self.emit('scale-to-fit-changed', self._scale_to_fit)
if self._scale_to_fit:
self.set_fitting_scale()
def _create_transformed_pattern(self, source_surface, transform_buffer=True):
"""
returns cairo pattern to set as source of a cairo context
When transform_buffer is False the returned pattern will have all
necessary transformations applied to its affine transformation
matrix. The source buffer, however will be the original source_surface.
So drawing that pattern will apply all transformations life, this
can result in a lot of cpu work when the pattern is drawn multiple
times.
When transform_buffer is True, the returned pattern will be a
pattern with no extra transformations applied. Instead its surface
will hold the image data after all transformations have been applied.
"""
# calculate width and height using the new matrix
matrix = self._get_scaled_matrix()
w, h, offset_x, offset_y = self._get_surface_extents(matrix, source_surface)
# finish the transformation matrix by translating the pattern
# back into view using the offsets the transformation created.
# IMPORTANT: the normal translate method of a cairo.Matrix applies
# before all other transformations. Here we need it to be applied
# after all transformations, hence the usage of multiply
translate_matrix = cairo.Matrix()
translate_matrix.translate(-offset_x, -offset_y)
matrix = matrix.multiply(translate_matrix)
source_pattern = cairo.SurfacePattern(source_surface)
# cairo.SurfacePattern uses inverted matrices, see the docs for pattern
matrix.invert()
source_pattern.set_matrix(matrix)
if not transform_buffer:
return source_pattern
# the result of this can be cached and will speedup the display
# for large images alot, because all transformations will be applied
# just once not on every draw signal
target_surface = cairo.ImageSurface(cairo.FORMAT_ARGB32, w, h)
# the context to draw on the new surface
co = cairo.Context(target_surface)
# draw to target_surface
co.set_source(source_pattern)
co.paint()
target_pattern = cairo.SurfacePattern(target_surface)
return target_pattern
def _get_source_pattern(self):
if self._source_surface is None:
self._transformed_pattern_cache = (None, None)
return None
source_surface = self._source_surface
new_check = (id(source_surface), self.scale, self.rotation)
check, transformed_pattern = self._transformed_pattern_cache
# see if the cache is invalid
if new_check != check:
# seems like a good rule of thumb to transform the buffer and
# use the result as surface for scales lower than 1 but for
# scales bigger than one the life transformation is fast enough
# this is likely not the best behavior in all scenarios
transform_buffer = self.scale < 1
transformed_pattern = self._create_transformed_pattern(source_surface, transform_buffer)
# cache the results
self._transformed_pattern_cache = (new_check, transformed_pattern)
return transformed_pattern
def draw_handler(self, da, cr):
width = self.get_allocated_width()
height = self.get_allocated_height()
left = math.floor(self.get_hadjustment().get_value())
top = math.floor(self.get_vadjustment().get_value())
pattern = self._get_source_pattern()
if pattern is not None:
cr.set_source(pattern)
# draws just the visible area
cr.rectangle(left, top, width, height)
cr.fill()
class CanvasControls(object):
""" Simplified interface for Canvas """
def __init__(self, canvas):
self.canvas = canvas
def zoomIn(self):
old = self.canvas.scale
old = round(old, 2)
new = old + 0.05
new = min(16, new)
self.canvas.scale = new
def zoom_out(self):
old = self.canvas.scale
old = round(old, 2)
new = old - 0.05
new = max(0.05, new)
self.canvas.scale = new
def zoom_unit(self):
self.canvas.scale = 1
def zoom_fit(self):
""" toggles scale to filt"""
self.canvas.scale_to_fit = not self.canvas.scale_to_fit
def rotate_right(self):
self.canvas.add_rotation(0.5)
def rotate_left(self):
self.canvas.add_rotation(-0.5)
class ScrollByHandTool(Gtk.EventBox):
""" Drag and drop interface to scroll the Image (2 adjustments) when
the mouse button is pressed and scrolling is possible
"""
def __init__(self, hadjustment, vadjustment):
Gtk.EventBox.__init__(self)
self.add_events(
Gdk.EventMask.BUTTON_PRESS_MASK
| Gdk.EventMask.BUTTON_RELEASE_MASK
| Gdk.EventMask.BUTTON1_MOTION_MASK # receive motion events only when button1 is pressed
| Gdk.EventMask.POINTER_MOTION_HINT_MASK
)
self.hadjustment = hadjustment
self.vadjustment = vadjustment
vadjustment.connect('value-changed', self.adjustment_value_changed_handler)
hadjustment.connect('value-changed', self.adjustment_value_changed_handler)
self.connect('button-press-event' , self.button_press_handler)
self.connect('button-release-event', self.button_release_handler)
self.connect('motion-notify-event' , self.motion_notify_handler)
self._scroll_base = None
self._can_scroll = False
def adjustment_value_changed_handler(self, *args):
h = self.hadjustment
v = self.vadjustment
# "value" field represents the position of the scrollbar, which must
# be between the "lower" field and "upper - page_size."
can_scroll_x = 0 < h.get_upper() - h.get_lower() - h.get_page_size()
can_scroll_y = 0 < v.get_upper() - v.get_lower() - v.get_page_size()
self._can_scroll = can_scroll_x or can_scroll_y
if self._can_scroll:
cursor = Gdk.Cursor.new(Gdk.CursorType.FLEUR)
self.get_window().set_cursor(cursor)
else:
cursor = Gdk.Cursor.new(Gdk.CursorType.ARROW)
self.get_window().set_cursor(cursor)
# stop scrolling if doing so
self._scroll_base = None
def button_press_handler(self, canvas, event):
if not self._can_scroll:
#no need to scroll
self._scroll_base = None
return
original_x = self.hadjustment.get_value()
original_y = self.vadjustment.get_value()
self._scroll_base = event.x, event.y, original_x, original_y
def motion_notify_handler(self, canvas, event):
if self._scroll_base is None:
return
start_x, start_y, original_x, original_y = self._scroll_base
now_x, now_y = event.x, event.y
delta_x = now_x - start_x
delta_y = now_y - start_y
self.hadjustment.set_value(original_x - delta_x)
self.vadjustment.set_value(original_y - delta_y)
def button_release_handler(self, canvas, event):
self._scroll_base = None
class PreviewWindow(Gtk.Window):
""" Display a preview of an image rendered as eps with inks_model as
source for the PostScript device deviceN.
"""
def __init__(self, preview_worker, inks_model, image_name=None):
Gtk.Window.__init__(self)
multitoner_icon_filename = os.path.join(DIRECTORY, 'assets', 'images',
'multitoner_icon.svg')
multitoner_icon = GdkPixbuf.Pixbuf.new_from_file(multitoner_icon_filename)
self.set_icon(multitoner_icon)
inks_model.add(self) #subscribe
self._preview_worker = preview_worker
def destroy_handler(self):
# remove the PreviewWindow from preview_worker
preview_worker.remove_client(self.id)
# This fixes a bug where references to the PreviewWindow still
# existed in the signal handler functions of the actions.
# (like self.action_rotate_left_handler) GTK did not remove these
# handlers and thus the PreviewWindow was not garbage collected.
# So the weakref was never released from the model emitter.
actions = self._global_actions.list_actions() + self._document_actions.list_actions()
for action in actions:
GObject.signal_handlers_destroy(action)
self.disconnect(destroy_handler_id)
return True
destroy_handler_id = self.connect('destroy', destroy_handler)
self.inks_model = weakref(inks_model)
self.image_name = image_name
self.set_default_size(640, 480)
self.set_has_resize_grip(True)
self._timeout = None
self._waiting = False
self._update_needed = False
self._no_inks = False
self.grid = Gtk.Grid()
self.add(self.grid)
self.scrolled = Gtk.ScrolledWindow()
adjustments = (self.scrolled.get_hadjustment(),
self.scrolled.get_vadjustment())
self.canvas = Canvas(*adjustments)
self.canvas.set_halign(Gtk.Align.CENTER)
self.canvas.set_valign(Gtk.Align.CENTER)
self.canvas_ctrl = CanvasControls(self.canvas)
self.scrolled.add(self.canvas)
self.scrolled.set_hexpand(True)
self.scrolled.set_vexpand(True)
scroll_by_hand = ScrollByHandTool(*adjustments)
scroll_by_hand.add(self.scrolled)
self.menubar, self.toolbar = self._init_menu()
# synchronize the zoom to fit value
self._set_zoom_fit_action_active_value(self.canvas.scale_to_fit)
self.canvas.connect('scale-to-fit-changed', self.scale_to_fit_changed_handler)
# self.grid.attach(self.menubar, 0, 0, 1, 1)
self.grid.attach(self.toolbar, 0, 1, 1, 1)
self.grid.attach(scroll_by_hand, 0, 3, 1, 1)
self._open_image(image_name)
@property
def id(self):
return id(self)
def _set_title(self):
filename = self.image_name or _('(no image)')
self.set_title(_('Multitoner Preview: {filename}').format(filename=filename))
@property
def image_name(self):
if not hasattr(self, '_image_name'):
self._image_name = None
return self._image_name
@image_name.setter
def image_name(self, value):
if value == self.image_name:
return
self._image_name = value
self._set_title()
def _make_global_actions(self):
action_group = ActionGroup('gloabl_actions')
action_group.add_actions([
('FileMenu', None, _('File'), None,
None, None)
, ('OpenImage', Gtk.STOCK_OPEN, _('Open Image'), 'o',
_('Open An Image For Preview'), self.action_open_image_handler)
, ('Quit', Gtk.STOCK_CLOSE, None, 'q',
_('Close Preview Window'), self.action_close_handler)
])
return action_group
def _make_document_actions(self):
action_group = ActionGroup('document_actions')
action_group.add_actions([
('EditMenu', None, _('Edit'), None,
None, None)
, ('ZoomIn', Gtk.STOCK_ZOOM_IN, None, 'plus',
_('Zoom In'), self.action_zoom_in_handler)
, ('ZoomOut', Gtk.STOCK_ZOOM_OUT, None, 'minus',
_('Zoom Out'), self.action_zoom_out_handler)
, ('ZoomUnit', Gtk.STOCK_ZOOM_100, None, '1',
_('Zoom to normal size.'), self.action_zoom_unit_handler)
])
action_group.add_icon_actions([
('ZoomFit', None, _('Zoom to fit image to window size.'),
None, self.action_zoom_fit_handler, 'F', Gtk.STOCK_ZOOM_FIT,
Gtk.ToggleAction)
, ('RotateRight', _('Rotate Clockwise'), _('Rotate clockwise.'),
'object-rotate-right', self.action_rotate_right_handler, 'R')
, ('RotateLeft', _('Rotate Counterclockwise'), _('Rotate counterclockwise.'),
'object-rotate-left', self.action_rotate_left_handler, 'L')
, ('ExportImage', _('Export Image'), _('Export image as EPS file.'),
'document-save', self.action_export_image_handler, 'E')
])
return action_group
def _set_zoom_fit_action_active_value(self, value):
zoom_fit_action = self._document_actions.get_action('ZoomFit')
zoom_fit_action.handler_block_by_func(self.action_zoom_fit_handler)
zoom_fit_action.set_active(value)
zoom_fit_action.handler_unblock_by_func(self.action_zoom_fit_handler)
def _init_menu(self):
self._global_actions = self._make_global_actions()
self._document_actions = self._make_document_actions()
self._document_actions.set_sensitive(False)
uimanager = Gtk.UIManager()
uimanager.add_ui_from_string(UI_INFO)
uimanager.insert_action_group(self._document_actions)
uimanager.insert_action_group(self._global_actions)
menubar = uimanager.get_widget("/MenuBar")
toolbar = uimanager.get_widget("/ToolBar")
# Add the accelerator group to the toplevel window
accelgroup = uimanager.get_accel_group()
self.add_accel_group(accelgroup)
return menubar, toolbar
def _show_message(self, *message):
window = self.get_toplevel()
show_message(window, *message)
def on_model_updated(self, inks_model, event, *args):
if not inks_model.visible_curves:
self.canvas.receive_surface(None)
self._no_inks = True
return
self._no_inks = False
if event == 'curveUpdate':
# whitelist, needs probbaly an update when more relevant events occur
ink_event = args[1]
if ink_event not in ('pointUpdate', 'addPoint', 'removePoint',
'setPoints', 'interpolationChanged',
'visibleChanged', 'cmykChanged',
'nameChanged'):
return
assert self.inks_model() is inks_model, 'A wrong inks_model instance ' \
'publishes to this PreviewWindow'
self._request_new_surface()
def _request_new_surface(self):
""" this will be called very frequently, because generating the
preview can take a moment this waits until the last call to this
method was 300 millisecconds ago and then let the rendering start
"""
# reset the timeout
if self._timeout is not None:
GObject.source_remove(self._timeout)
# schedule a new execution
self._timeout = GObject.timeout_add(300, self._update_surface)
def _update_surface(self):
inks_model = self.inks_model()
# see if the model still exists
if inks_model is None or not inks_model.visible_curves or self.image_name is None:
# need to return False, to cancel the timeout
return False
if self._waiting:
# we are waiting for a job to finish, so we don't put another
# job on the queue right now
self._update_needed = True
return False
self._waiting = True
callback = (self._worker_callback, self.image_name)
self._preview_worker.add_job(self.id, callback, self.image_name, *inks_model.visible_curves)
# this timout shall not be executed repeatedly, thus returning false
return False
def _worker_callback(self, *args):
GLib.idle_add(self._receive_surface, *args)
def _receive_surface(self, type, image_name, *args):
self._waiting = False
if type == 'result':
message = args[-1]
if message is not None:
GLib.idle_add(self._show_message, *message)
cairo_surface = self._make_surface(image_name, *args[0:-1])
else:
if type == 'error':
self.image_name = None
GLib.idle_add(self._show_message, type, *args)
cairo_surface = None
if cairo_surface is not None:
self._document_actions.set_sensitive(True)
else:
self._document_actions.set_sensitive(False)
self.canvas.receive_surface(cairo_surface)
def _make_surface(self, image_name, w, h, rowstride, buf):
if self._no_inks or self.image_name != image_name:
# this may receive a surface after all inks are invisible
# or after the image to display changed
cairo_surface = None
else:
cairo_surface = cairo.ImageSurface.create_for_data(
buf, cairo.FORMAT_RGB24, w, h, rowstride
)
if self._update_needed:
# while we where waiting another update became due
self._update_needed = False
self._request_new_surface()
return cairo_surface
def _open_image(self, image_name):
if image_name is None:
return
self.image_name = image_name
self._request_new_surface()
def ask_for_image(self):
window = self.get_toplevel()
filename = show_open_image_dialog(window)
self._open_image(filename)
# actions
def action_zoom_in_handler(self, widget):
self.canvas_ctrl.zoomIn()
def action_zoom_out_handler(self, widget):
self.canvas_ctrl.zoom_out()
def action_zoom_unit_handler(self, widget):
self.canvas_ctrl.zoom_unit()
def action_zoom_fit_handler(self, widget):
self.canvas_ctrl.zoom_fit()
def action_rotate_right_handler(self, widget):
self.canvas_ctrl.rotate_right()
def action_rotate_left_handler(self, widget):
self.canvas_ctrl.rotate_left()
def scale_to_fit_changed_handler(self, widget, scale_to_fit):
self._set_zoom_fit_action_active_value(scale_to_fit)
def action_open_image_handler(self, widget):
self.ask_for_image()
def action_close_handler(self, widget):
self.destroy()
def action_export_image_handler(self, widget):
inks_model = self.inks_model()
image_filename = self.image_name
if image_filename is None or inks_model is None:
return
window = self.get_toplevel()
eps_filename = show_save_as_eps_dialog(window, image_filename)
if eps_filename is None:
return
result, message = model2eps(inks_model, image_filename, eps_filename)
if message:
window = self.get_toplevel()
show_message(window, *message)
if __name__ == '__main__':
""" this can be used as stand alone preview application """
import sys
import json
from model import ModelCurves, ModelInk
from ghostscript_workers import PreviewWorker
GObject.threads_init()
if len(sys.argv) > 1:
mtt_file = sys.argv[1]
image_name = None
if len(sys.argv) > 2:
image_name = sys.argv[-1]
print (image_name, mtt_file)
with open(mtt_file) as f:
data = json.load(f)
model = ModelCurves(ChildModel=ModelInk, **data)
preview_worker = PreviewWorker.new_with_pool(processes=1)
preview_window = PreviewWindow(preview_worker, model, image_name)
preview_window.connect('destroy', Gtk.main_quit)
preview_window.show_all()
Gtk.main()
else:
print (_('Need a .mtt file as first argument and optionally an image file as last argument.'))
| graphicore/multitoner | gtk_preview.py | Python | gpl-3.0 | 31,806 | [
"FLEUR"
] | dce1e9506c9cd02f0d97a144e258d26993ab301886ef02a2abfaf196fd033a89 |
#!/usr/bin/env python3
"""The influence of windowing of log. sweep signals when using a
Kaiser Window by fixing beta (=7) and fade_in (=0).
fstart = 100 Hz
fstop = 5000 Hz
Deconvolution: Unwindowed
"""
import sys
sys.path.append('..')
import measurement_chain
import plotting
import calculation
import generation
import matplotlib.pyplot as plt
import windows
from scipy.signal import lfilter
import numpy as np
# Parameters of the measuring system
fs = 44100
fstart = 100
fstop = 5000
duration = 1
pad = 4
# Generate excitation signal
excitation = generation.log_sweep(fstart, fstop, duration, fs)
N = len(excitation)
# Noise in measurement chain
noise_level_db = -30.
noise = measurement_chain.additive_noise(noise_level_db)
# FIR-Filter-System
dirac_system = measurement_chain.convolution([1.0])
# Combinate system elements
system = measurement_chain.chained(dirac_system, noise)
# Lists
beta = 7
fade_in = 0
fade_out_list = np.arange(0, 1001, 1)
# Spectrum of dirac for reference
dirac = np.zeros(pad * fs)
dirac[0] = 1
dirac_f = np.fft.rfft(dirac)
def get_results(fade_out):
excitation_windowed = excitation * windows.window_kaiser(N,
fade_in,
fade_out,
fs, beta)
excitation_zeropadded = generation.zero_padding(excitation, pad, fs)
excitation_windowed_zeropadded = generation.zero_padding(
excitation_windowed, pad, fs)
system_response = system(excitation_windowed_zeropadded)
ir = calculation.deconv_process(excitation_zeropadded,
system_response,
fs)
return ir
with open("log_sweep_kaiser_window_bandlimited_script6_1.txt", "w") as f:
for fade_out in fade_out_list:
ir = get_results(fade_out)
pnr = calculation.pnr_db(ir[0], ir[1:4 * fs])
spectrum_distance = calculation.vector_distance(
dirac_f, np.fft.rfft(ir[:pad * fs]))
f.write(
str(fade_out) + " " + str(pnr) +
" " + str(spectrum_distance) + " \n")
| spatialaudio/sweep | log_sweep_kaiser_window_bandlimited_script6/log_sweep_kaiser_window_bandlimited_script6_1.py | Python | mit | 2,209 | [
"DIRAC"
] | 483871c882e7be19cd215083653b35a56e4b0fb56599d0dd869293b5f1df8c5e |
"""
Sheets for simulating a moving eye.
This module provides two classes, ShiftingGeneratorSheet, and
SaccadeController, that can be used to simulate a moving eye,
controlled by topographic neural activity from structures like the
superior colliculus.
ShiftingGeneratorSheet is a subclass of GeneratorSheet that accepts a
saccade command on the 'Saccade' port in the form of a tuple:
(amplitude,direction), specified in degrees. It shifts its sheet
bounds in response to this command, keeping the centroid of the bounds
within a prespecified boundingregion.
SaccadeController is a subclass of CFSheet that accepts CF projections
and decodes its resulting activity into a saccade command suitable for
controlling a ShiftingGeneratorSheet.
$Id$
"""
__version__ = '$Revision$'
from numpy import sin,cos,pi,array,asarray,argmax,zeros,\
nonzero,take,random
import param
from topo.base.cf import CFSheet
from topo.base.simulation import PeriodicEventSequence,FunctionEvent
from topo.base.boundingregion import BoundingBox,BoundingRegionParameter
from topo.base.functionfamily import CoordinateMapperFn, IdentityMF
from topo.sheet import SequenceGeneratorSheet
from topo.misc import util
# JPALERT: The next three functions (activity_centroid,
# activity_sample, and activity_mode) could actually apply to any
# Sheet. Maybe they should be moved to topo.base.sheet?
def activity_centroid(sheet,activity=None,threshold=0.0):
"""
Return the sheet coords of the (weighted) centroid of sheet activity.
If the activity argument is not None, then it is used instead
of sheet.activity. If the sheet activity is all zero, the
centroid of the sheet bounds is returned.
"""
if activity is None:
activity = sheet.activity
ys = sheet.sheet_rows()
xs = sheet.sheet_cols()
xy = array([(x,y) for y in reversed(ys) for x in xs])
a = activity.flat
## Optimization to only compute centroid from
## active (non-zero) units.
idxs = nonzero(a > threshold)[0]
if not len(idxs):
return sheet.bounds.centroid()
return util.centroid(take(xy,idxs,axis=0),take(a,idxs))
def activity_sample(sheet,activity=None):
"""
Sample from the sheet activity as if it were a probability distribution.
Returns the sheet coordinates of the sampled unit. If
activity is not None, it is used instead of sheet.activity.
"""
if activity is None:
activity = sheet.activity
idx = util.weighted_sample_idx(activity.ravel())
r,c = util.idx2rowcol(idx,activity.shape)
return sheet.matrix2sheet(r,c)
def activity_mode(sheet,activity=None):
"""
Returns the sheet coordinates of the mode (highest value) of
the sheet activity.
"""
# JPHACKALERT: The mode is computed using numpy.argmax, and
# thus for distributions with multiple equal-valued modes, the
# result will have a systematic bias toward higher x and lower
# y values. (in that order). Function may still be useful for
# unimodal activity distributions, or sheets without limiting/squashing
# output functions.
if activity is None:
activity = sheet.activity
idx = argmax(activity.flat)
r,c = util.idx2rowcol(idx,activity.shape)
return sheet.matrix2sheet(r,c)
class SaccadeController(CFSheet):
"""
Sheet that decodes activity on CFProjections into a saccade command.
This class accepts CF-projected input and computes its activity
like a normal CFSheet, then decodes that activity into a saccade
amplitude and direction as would be specified by activity in the
superior colliculi. The X dimension of activity corresponds to
amplitude, the Y dimension to direction. The activity is decoded
to a single (x,y) point according to the parameter decode_method.
From this (x,y) point an (amplitude,direction) pair, specified in
degrees, is computed using the parameters amplitude_scale and
direction scale. That pair is then sent out on the 'Saccade'
output port.
NOTE: Non-linear mappings for saccade commands, as in Ottes, et
al (below), are assumed to be provided using the coord_mapperg
parameter of the incoming CFProjection.
References:
Ottes, van Gisbergen, Egglermont. 1986. Visuomotor fields of the
superior colliculus: a quantitative model. Vision Research;
26(6): 857-73.
"""
# JPALERT: amplitude_scale and direction scale can be implemented as
# part of self.command_mapper, so these should probably be removed.
amplitude_scale = param.Number(default=120,doc="""
Scale factor for saccade command amplitude, expressed in
degrees per unit of sheet. Indicates how large a saccade is
represented by the x-component of the command input.""")
direction_scale = param.Number(default=180,doc="""
Scale factor for saccade command direction, expressed in
degrees per unit of sheet. Indicates what direction of saccade
is represented by the y-component of the command input.""")
decode_fn = param.Callable(default=activity_centroid,
instantiate=False,doc="""
The function for extracting a single point from sheet activity.
Should take a sheet as the first argument, and return (x,y).""")
command_mapper = param.ClassSelector(CoordinateMapperFn,default=IdentityMF(),
doc="""
A CoordinateMapperFn that will be applied to the command vector extracted
from the sheet activity.""")
src_ports = ['Activity','Saccade']
def activate(self):
super(SaccadeController,self).activate()
# get the input projection activity
# decode the input projection activity as a command
xa,ya = self.decode_fn(self)
self.verbose("Saccade command centroid = (%.2f,%.2f)"%(xa,ya))
xa,ya = self.command_mapper(xa,ya)
amplitude = xa * self.amplitude_scale
direction = ya * self.direction_scale
self.verbose("Saccade amplitute = %.2f."%amplitude)
self.verbose("Saccade direction = %.2f."%direction)
self.send_output(src_port='Saccade',data=(amplitude,direction))
class ShiftingGeneratorSheet(SequenceGeneratorSheet):
"""
A GeneratorSheet that takes an extra input on port 'Saccade'
that specifies a saccade command as a tuple (amplitude,direction),
indicating the relative size and direction of the saccade in
degrees. The parameter visual_angle_scale defines the
relationship between degrees and sheet coordinates. The parameter
saccade bounds limits the region within which the saccades may occur.
"""
visual_angle_scale = param.Number(default=90,doc="""
The scale factor determining the visual angle subtended by this sheet, in
degrees per unit of sheet.""")
saccade_bounds = BoundingRegionParameter(default=BoundingBox(radius=1.0),doc="""
The bounds for saccades. Saccades are constrained such that the centroid of the
sheet bounds remains within this region.""")
generate_on_shift = param.Boolean(default=True,doc="""
Whether to generate a new pattern when a shift occurs.""")
fixation_jitter = param.Number(default=0,doc="""
Standard deviation of Gaussian fixation jitter.""")
fixation_jitter_period = param.Number(default=10,doc="""
Period, in time units, indicating how often the eye jitters.
""")
dest_ports = ["Trigger","Saccade"]
src_ports = ['Activity','Position']
def __init__(self,**params):
super(ShiftingGeneratorSheet,self).__init__(**params)
self.fixation_point = self.bounds.centroid()
def start(self):
super(ShiftingGeneratorSheet,self).start()
if self.fixation_jitter_period > 0:
now = self.simulation.time()
refix_event = PeriodicEventSequence(now+self.simulation.convert_to_time_type(self.fixation_jitter_period),
self.simulation.convert_to_time_type(self.fixation_jitter_period),
[FunctionEvent(0,self.refixate)])
self.simulation.enqueue_event(refix_event)
def input_event(self,conn,data):
if conn.dest_port == 'Saccade':
# the data should be (amplitude,direction)
amplitude,direction = data
self.shift(amplitude,direction)
def generate(self):
super(ShiftingGeneratorSheet,self).generate()
self.send_output(src_port='Position',
data=self.bounds.aarect().centroid())
def shift(self,amplitude,direction,generate=None):
"""
Shift the bounding box by the given amplitude and
direction.
Amplitude and direction are specified in degrees, and will be
converted using the sheet's visual_angle_scale
parameter. Negative directions are always downward, regardless
of whether the amplitude is positive (rightword) or negative
(leftward). I.e. straight-down = -90, straight up = +90.
The generate argument indicates whether or not to generate
output after shifting. If generate is None, then the value of
the sheet's generate_on_shift parameter will be used.
"""
# JPALERT: Right now this method assumes that we're doing
# colliculus-style saccades. i.e. amplitude and direction
# relative to the current position. Technically it would
# not be hard to also support absolute or relative x,y
# commands, and select what style to use with either with
# a parameter, or with a different input port (e.g. 'xy
# relative', 'xy absolute' etc.
# JPHACKALERT: Currently there is no support for modeling the
# fact that saccades actually take time, and larger amplitudes
# take more time than small amplitudes. No clue if we should
# do that, or how, or what gets sent out while the saccade
# "eye" is moving.
assert not self._out_of_bounds()
# convert the command to x/y translation
radius = amplitude/self.visual_angle_scale
# if the amplitude is negative, negate the direction (so up is still up)
if radius < 0.0:
direction *= -1
self._translate(radius,direction)
if self._out_of_bounds():
self._find_saccade_in_bounds(radius,direction)
self.fixation_point = self.bounds.centroid()
if generate is None:
generate = self.generate_on_shift
if generate:
self.generate()
def refixate(self):
"""
Move the bounds toward the fixation point.
Moves the bounds toward the fixation point specified in
self.fixation_point, potentially with noise as specified by
the parameter self.fixation_jitter.
"""
self.debug("Refixating.")
if self.fixation_jitter > 0:
jitter_vec = random.normal(0,self.fixation_jitter,(2,))
else:
jitter_vec = zeros((2,))
fix = asarray(self.fixation_point)
pos = asarray(self.bounds.centroid())
refix_vec = (fix - pos) + jitter_vec
self.bounds.translate(*refix_vec)
def _translate(self,radius,angle):
angle *= pi/180
xoff = radius * cos(angle)
yoff = radius * sin(angle)
self.verbose("Applying translation vector (%.2f,%.2f)"%(xoff,yoff))
self.bounds.translate(xoff,yoff)
def _out_of_bounds(self):
"""
Return true if the centroid of the current bounds is outside the saccade bounds.
"""
return not self.saccade_bounds.contains(*self.bounds.aarect().centroid())
def _find_saccade_in_bounds(self,radius,theta):
"""
Find a saccade in the given direction (theta) that lies within self.saccade_bounds.
Assumes that the given saccade was already performed and
landed out of bounds.
"""
# JPHACKALERT: This method iterates to search for a saccade
# that lies in bounds along the saccade vector. We should
# really compute this algebraically. Doing so involves computing
# the intersection of the saccade vector with the saccade
# bounds. Ideally, each type of BoundingRegion would know how
# to compute its own intersection with a given line (should be
# easy for boxes, circles, and ellipses, at least.)
# Assume we're starting out of bounds, so start by shifting
# back to the original position
self._translate(-radius,theta)
while not self._out_of_bounds():
radius *= 0.5
self._translate(radius,theta)
radius = -radius
while self._out_of_bounds():
radius *= 0.5
self._translate(radius,theta)
__all__ = [
"SaccadeController",
"ShiftingGeneratorSheet",
"activity_centroid",
"activity_sample",
"activity_mode",
]
| ioam/svn-history | topo/sheet/saccade.py | Python | bsd-3-clause | 13,233 | [
"Gaussian"
] | b222bacf3481d1f036233ccd9ef82b36f560aee681119f8f812b5fa868c10030 |
import logs
import copy
import csv
import logging
import os
import random
import shutil
import string
import StringIO
import sys
import subprocess
import tempfile
import threading
class EnergyPlus(object):
"""Represents the Energy Plus command-line software.
This class provides a consistent interface to the Energy Plus
software. With it, command-line processes can be run on IDF
files and their results can be processed. Instantiating
this class requires specification of the EPlus runner program,
postprocess program, and data definition file.
For the Windows and Ubuntu operating systems, the following
instantiations should work for default Energy Plus installations:
eplus_ubuntu = EnergyPlus("/usr/local/bin/energyplus",
"/usr/local/bin/runreadvars",
"/usr/local/EnergyPlus-7-0-0/Energy+.idd")
eplus_windows = EnergyPlus("C:\\EnergyPlus-7-0-0\\EnergyPlus.exe",
"C:\\EnergyPlusV7-0-0\\PostProcess\\ReadVarsESO.exe",
"C:\\EnergyPlus-7-0-0\\Energy+.idd")
"""
def __init__(self, runner=None, postprocessor=None, iddfile=None):
if sys.platform == 'win32':
if runner is None:
runner = 'C:\\EnergyPlusV7-0-0\\EnergyPlus.exe'
if postprocessor is None:
postprocessor = 'C:\\EnergyPlusV7-0-0\\PostProcess\\ReadVarsESO.exe'
if iddfile is None:
iddfile = 'C:\\EnergyPlusV7-0-0\\Energy+.idd'
elif sys.platform == 'linux2':
if runner is None:
runner = '/usr/local/bin/energyplus'
if postprocessor is None:
postprocessor = '/usr/local/bin/runreadvars'
if iddfile is None:
iddfile = '/usr/local/EnergyPlus-7-0-0/bin/Energy+.idd'
else:
raise OSError('Unsupported Platform')
self.runner = runner
self.postprocessor = postprocessor
self.iddfile = iddfile
def run(self, model_filename, weather_filename,
supplemental_files=None, working_directory=None):
"""Run Energy Plus on the specified model file.
This function runs the Energy Plus command-line script on the
given IDF model and EPW weather files. If specified, the `supplemental_files`
should be a list of tuples of the form [(srcfilename, dstfilename),...].
The srcfilename should be the location of the supplemental file, and the
dstfilename should be what that file should be called in the EnergyPlus
running directory. (This is useful for schedule files which may have
hard-coded names in a model file.)
The function returns an EPlusResults object for the results
if the Energy Plus simulation could be executed and None otherwise.
"""
logger = logging.getLogger('eplus')
logger.addHandler(logs.NullHandler())
if (self.runner is not None and self.postprocessor is not None and
self.iddfile is not None and os.path.isfile(self.runner) and
os.path.isfile(self.postprocessor) and os.path.isfile(self.iddfile)):
if working_directory is None:
working_directory = os.getcwd()
elif not os.path.isabs(working_directory):
working_directory = os.path.join(os.getcwd(), working_directory)
if not os.path.exists(working_directory):
os.makedirs(working_directory)
output_directory = tempfile.mkdtemp(suffix='.%s' % str(id(threading.current_thread)), dir=working_directory)
shutil.copy(self.iddfile, os.path.join(output_directory, 'Energy+.idd'))
shutil.copy(weather_filename, os.path.join(output_directory, 'in.epw'))
if model_filename is not None and os.path.exists(model_filename):
shutil.copy(model_filename, os.path.join(output_directory, 'in.idf'))
if supplemental_files is not None:
for (srcfilename, dstfilename) in supplemental_files:
dstbasename = os.path.basename(dstfilename)
if os.path.exists(srcfilename):
shutil.copy(srcfilename, os.path.join(output_directory, dstbasename))
if os.path.isfile(model_filename) and os.path.isfile(weather_filename):
previous_dir = os.getcwd()
os.chdir(output_directory)
p = subprocess.Popen([self.runner], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
logger.debug('EnergyPlus.run() :: The runner stdout was \n{0}'.format(stdout.strip()))
logger.debug('EnergyPlus.run() :: The runner stderr was \n{0}'.format(stderr.strip()))
# The post-processing script on Linux works differently and requires
# the filename. The Windows batch file will allow arguments, but
# they are variable names to be collected instead of a filename.
if sys.platform == 'linux2':
ppargs = [self.postprocessor, 'eplusout.eso']
else:
ppargs = [self.postprocessor]
p = subprocess.Popen(ppargs, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
logger.debug('EnergyPlus.run() :: The postprocess stdout was \n{0}'.format(stdout.strip()))
logger.debug('EnergyPlus.run() :: The postprocess stderr was \n{0}'.format(stderr.strip()))
results = None
errtext = ''
# Load the err file and see if there was a fatal error.
with open(os.path.join(output_directory, 'eplusout.err')) as errfile:
errtext = errfile.read()
if ' Fatal ' in errtext:
logger.error('EnergyPlus.run() :: The model file {0} produced errors.'.format(model_filename))
logger.error('EnergyPlus.run() :: The error file contained \n{0}'.format(errtext.strip()))
else:
results_filename = os.path.join(output_directory, 'eplusout.csv')
with open(results_filename, 'r') as resultsfile:
results = EPlusResults(resultsfile)
os.chdir(previous_dir)
try:
shutil.rmtree(output_directory)
except:
pass
return results
elif not os.path.isfile(model_filename):
logger.error('EnergyPlus.run() :: The file {0} does not exist.'.format(model_filename))
return None
else:
logger.error('EnergyPlus.run() :: The file {0} does not exist.'.format(weather_filename))
return None
else:
logger.error('EnergyPlus.run() :: The EnergyPlus configuration (installation directory, etc.) is not valid.')
return None
class EPlusVariable(object):
def __init__(self, idfclass=None, idfobject=None, idffield=None, default=None, minimum=None, maximum=None, distribution=None, type=None):
self.idfclass = idfclass
self.idfobject = idfobject
self.idffield = idffield
if type is not None:
self.type = type.lower() # 'float' or 'integer'
if self.type == 'integer':
self.default = int(default)
self.minimum = int(minimum)
self.maximum = int(maximum)
elif self.type == 'float':
self.default = float(default)
self.minimum = float(minimum)
self.maximum = float(maximum)
else:
self.type = type
self.default = default
self.minimum = minimum
self.maximum = maximum
self.distribution = distribution.lower() if distribution is not None else distribution
self.group = None
self.constraint = None
self.value = self.default
def get_random_value(self, random):
# Need to consider the distribution here.
if self.type == 'integer':
return random.randint(self.minimum, self.maximum)
else: # self.type == 'float'
return random.uniform(self.minimum, self.maximum)
def get_center(self):
if self.type == 'integer':
return int((self.maximum + self.minimum) / 2.0)
else: # self.type == 'float':
return (self.maximum + self.minimum) / 2.0
def __str__(self):
return '{0}, {1}, {2}, {3}, {4}, {5}, {6}, {7}, {8}, {9}, {10}'.format(self.idfclass, self.idfobject, self.idffield, self.default,
self.minimum, self.maximum, self.distribution, self.type,
self.group, self.constraint, self.value)
def __repr__(self):
return str(self)
class EPlusConstraint(object):
"""Represents a complex constraint on EnergyPlus variable values.
This class defines a constraint on the variables in an EnergyPlus candidate.
These constraints are complex in that they have interaction with other
variables. Simple constraints, such as the variable being within some legal
range, are handled using the minimum and maximum values for the variable.
A constraint is essentially an expression of inequality (< or <=) that
contains variable names (group names for the E+ variables).
"""
def __init__(self, constraint=''):
comparators = ['>=', '<=', '>', '<']
operators = ['+', '-', '*', '/']
self.EPSILON = 0.0000000001
self.constraint = constraint.strip()
self.variables = []
self.comparator = None
self.lhs = None
self.rhs = None
# Because of the pain of having to deal with both >= and >,
# we can simply rely on the fact that any legitimate constraint
# will only have ONE of those.
for c in comparators:
s = self.constraint.replace(c, ' {0} '.format(c))
if len(s) != len(self.constraint):
self.constraint = s;
break
for o in operators:
self.constraint = self.constraint.replace(o, ' {0} '.format(o))
self.constraint = ' '.join(self.constraint.split())
parts = self.constraint.split()
if len(parts) > 0:
for p in parts:
if p[0] in string.lowercase or p[0] in string.uppercase:
self.variables.append(p)
elif p in comparators:
self.comparator = p
sides = self.constraint.split(self.comparator)
self.lhs = sides[0].strip()
self.rhs = sides[1].strip()
def is_valid(self):
"""Determines whether the constraint is valid (i.e., is well-formed).
This method returns True if the constraint is valid. Validity here
means that the constraint is a well-formed inequality in that it
contains variable names and a comparator.
"""
return self.constraint is not None and len(self.constraint) > 0 and len(self.variables) > 0 and self.comparator is not None
def __str__(self):
if self.is_valid():
return self.constraint
else:
return ''
def __repr__(self):
return str(self)
class EPlusCandidate(object):
"""Represents an EnergyPlus candidate.
This class defines a candidate solution. Each candidate is essentially a
dictionary of lists of EnergyPlus variables keyed to their group names.
For instance, if there were 5 variables and 3 groups, the candidate might
look like the following:
{'grp1': [var1], 'grp2': [var2, var4], 'grp3': [var3, var5]}
Variables in the same group have exactly the same values. As far as the
evolutionary computation is concerned, the groups are the candidate components.
The class provides methods for retrieving and storing only the variable values
(as a list in ascending order by group name), as well as for evaluating any
constraints on the variables.
The __iter__ method is overridden in this class, which allows a user to create
code such as the following:
for group in candidate:
var = candidate.get_variable(group)
"""
def __init__(self, variables):
"""Constructs a candidate solution from a list of EPlusVariable objects.
This constructor accepts a list of EnergyPlus variables and creates a
dictionary of those variables keyed to their groups.
"""
self.variables = {}
for variable in variables:
newvar = copy.deepcopy(variable)
if variable.group not in self.variables:
self.variables[variable.group] = [newvar]
else:
self.variables[variable.group].append(newvar)
def __iter__(self):
return iter(sorted(self.variables))
def _evaluate(self, expression, variables):
# Remove the variables before we assign.
for varname in variables:
try:
exec('del {0}'.format(varname))
except NameError:
pass
for varname in variables:
exec('{0} = {1}'.format(varname, variables[varname]))
result = eval(expression)
# Remove the variables after we're done.
for varname in variables:
exec('del {0}'.format(varname))
return result
def get_variable(self, group):
"""Returns the representative variable for the group.
If a group contains more than one variable, only the first is
returned.
"""
return self.variables[group][0]
def get_value(self, group):
"""Returns the representative value for the group.
If a group contains more than one variable, only the first
variable's value is returned.
"""
return self.variables[group][0].value
def set_value(self, group, value):
"""Sets all variables in the group to the specified value.
"""
for i, v in enumerate(self.variables[group]):
self.variables[group][i].value = value
def get_constraint_order(self):
"""Returns a list of the groups in the candidate in ascending order of range.
This method returns a list of the groups (keys) in the candidate,
sorted by the flexibility of their constraints. Variables that appear
in many constraints should be higher in the order than those in fewer
constraints. Likewise, variables with small ranges should appear
higher in the order than those with large ranges.
"""
constraint_count = {}
for g in sorted(self.variables):
constraint_count[g] = 0
for group in sorted(self.variables):
variable = self.get_variable(group)
if variable.constraint is not None:
for cvar in variable.constraint.variables:
if cvar in constraint_count:
constraint_count[cvar] += 1
order = []
for group in sorted(self.variables):
variable = self.get_variable(group)
range = variable.maximum - variable.minimum
order.append((group, constraint_count[group], range))
order.sort(key=lambda x: x[2])
order.sort(key=lambda x: x[1], reverse=True)
return [x[0] for x in order]
def get_constrained_bounds(self, group):
"""Returns the current bounds for a given variable group's constraints.
This method returns a (min, max) tuple representing the bounds for a
given variable group. The reason this method is included is because,
during constraint evaluation, the setting of one variable's value will
likely impact the actual range of another constrained variable. This
method, when paired with the get_constraint_order method, can be called
before each variable group is set.
"""
variable = self.get_variable(group)
constraint = variable.constraint
if constraint.is_valid():
var_dict = {}
for varname in constraint.variables:
var_dict[varname] = self.get_value(varname)
var_dict[variable.group] = variable.minimum
remainder = self._evaluate(constraint.lhs, var_dict) - self._evaluate(constraint.rhs, var_dict)
var_dict[variable.group] = variable.maximum
sign_check = self._evaluate(constraint.lhs, var_dict) - self._evaluate(constraint.rhs, var_dict)
# So we now have our free variable X along with a vector of
# other variables V, and we know that Xmin + REMAINDER + V = 0.
# So if we require that Xmin + REMAINDER + V < 0, then X can
# at most be Xmin + REMAINDER.
# But what if it's -Xmin + REMAINDER + V = 0?
# In that case, if we require that -Xmin + REMAINDER + V < 0,
# then X must be at least Xmin + REMAINDER.
if sign_check < remainder:
# We have a negative sign on the free variable.
new_bounds = (remainder + variable.minimum, variable.maximum)
else:
# We have a positive sign on the free variable.
new_bounds = (variable.minimum, variable.minimum - remainder)
if '<=' in constraint.comparator:
bounds = (max(variable.minimum, new_bounds[0]), min(variable.maximum, new_bounds[1]))
return (min(bounds[0], variable.maximum), max(bounds[1], variable.minimum))
elif '<' in constraint.comparator:
bounds = (max(variable.minimum, new_bounds[0] + constraint.EPSILON), min(variable.maximum, new_bounds[1] - constraint.EPSILON))
return (min(bounds[0], variable.maximum), max(bounds[1], variable.minimum))
else:
raise NotImplementedError('{0} constraints are not currently supported.'.format(constraint.comparator))
else:
return (variable.minimum, variable.maximum)
def evaluate_constraint(self, group):
"""Returns True if the constraint is currently satisfied for the specified group.
"""
variable = self.variables[group][0]
constraint = variable.constraint
if constraint.is_valid():
var_dict = {}
for varname in constraint.variables:
var_dict[varname] = self.get_value(varname)
return self._evaluate(constraint.constraint, var_dict)
else:
return True
def get_values(self):
"""Returns a group-sorted list of the variable values in the candidate.
"""
vals = []
for group in sorted(self.variables):
vals.append(self.get_value(group))
return vals
def set_values(self, values):
"""Sets the variable values in the candidate according to the group-sorted list.
"""
for val, group in zip(values, sorted(self.variables)):
self.set_value(group, val)
def permutation(self, prng=None):
"""Creates a new candidate that is a random permutation of the candidate.
"""
if prng is None:
prng = random
perm = copy.deepcopy(self)
groups = perm.get_constraint_order()
for group in groups:
variable = perm.get_variable(group)
perm.set_value(group, variable.get_center())
for group in groups:
variable = perm.get_variable(group)
lo, hi = perm.get_constrained_bounds(group)
if variable.type == 'integer':
perm.set_value(group, prng.randint(lo, hi))
elif variable.type == 'float':
perm.set_value(group, prng.uniform(lo, hi))
return perm
def values_from_idf(self, idffile):
"""Initializes the candidate with values from the IDFFile object.
This method takes an IDFFile object representing an IDF file and
initializes the values of the candidate to those pulled from the
IDFFile. The candidate must already exist before this method can
be used. Remember that the actual candidate elements must come from
a parameter file. Otherwise, this method would not know which values
to pull from the IDFFile object.
"""
for group in self.variables:
for i, v in enumerate(self.variables[group]):
val = idffile.find(v.idfclass, v.idfobject, v.idffield)
self.variables[group][i].value = int(val) if v.type == 'integer' else float(val)
def values_to_idf(self, idffile):
"""Loads the IDFFile object with the candidate's values and returns the modified IDFFile.
This method takes an IDFFile object, makes a deep copy and replaces its
values with those from the candidate. The method returns the full,
modified IDFFile copy.
"""
idffile = copy.deepcopy(idffile)
for group in self.variables:
for i, v in enumerate(self.variables[group]):
idffile.update(v.idfclass, v.idfobject, v.idffield, str(v.value))
return idffile
def __str__(self):
return str(self.get_values())
def __repr__(self):
return str(self)
class EPlusVariableSet(object):
def __init__(self, param_iterable=None):
self.load(param_iterable)
def __iter__(self):
return iter(self.variables)
def __getitem__(self, i):
return self.variables[i]
def __setitem__(self, i, value):
self.variables[i] = value
def __len__(self):
return len(self.variables)
def load(self, param_iterable=None):
self.variables = []
if param_iterable is not None:
group_num = 1
for p in param_iterable:
v = EPlusVariable(p['Class'], p['Object'], p['Field'],
p['Default'], p['Minimum'], p['Maximum'],
p['Distribution'], p['Type'])
if 'Group' in p and len(p['Group'].strip()) > 0:
v.group = p['Group'].strip()
else:
v.group = 'EnergyPlusGroup{0:03d}'.format(group_num)
group_num += 1
if 'Constraint' in p and len(p['Constraint'].strip()) > 0:
v.constraint = EPlusConstraint(p['Constraint'].strip())
else:
v.constraint = EPlusConstraint()
self.variables.append(v)
return self.variables
def __str__(self):
dicts = []
for v in self.variables:
d = {'Class': v.idfclass, 'Object': v.idfobject, 'Field': v.idffield,
'Default': v.default, 'Minimum': v.minimum, 'Maximum': v.maximum,
'Distribution': v.distribution, 'Type': v.type, 'Group': v.group,
'Constraint': v.constraint}
dicts.append(d)
for d in dicts:
if 'EnergyPlusGroup' in d['Group']:
d['Group'] = ''
strfile = StringIO.StringIO()
writer = csv.DictWriter(strfile, ['Class', 'Object', 'Field', 'Default',
'Minimum', 'Maximum', 'Distribution',
'Type', 'Group', 'Constraint'])
writer.writeheader()
writer.writerows(dicts)
return strfile.getvalue()
class EPlusResults(object):
def __init__(self, resultsfile):
self.results = []
self.load(resultsfile)
def __iter__(self):
return iter(self.results)
def __getitem__(self, i):
return self.results[i]
def __setitem__(self, i, value):
self.results[i] = value
def __len__(self):
return len(self.results)
def load(self, resultsfile):
self.results = []
resultstr = resultsfile.read()
resultcsv = csv.DictReader(resultstr.splitlines())
for i, row in enumerate(resultcsv):
r = {}
for key in row:
if 'Date/Time' in key:
r[key.strip()] = row[key].strip()
else:
try:
r[key.strip()] = float(row[key])
except ValueError:
r[key.strip()] = None
self.results.append(r)
def __str__(self):
strfile = StringIO.StringIO()
header = ['Date/Time'] + [k for k in sorted(self.results[0]) if k != 'Date/Time']
writer = csv.DictWriter(strfile, header)
writer.writer.writerow(header)
writer.writerows(self.results)
return strfile.getvalue()
| aarongarrett/idf2xml | idf2xml/eplus.py | Python | apache-2.0 | 25,729 | [
"EPW"
] | 3f1634c7a8fad4adf8ccf43a25e9489b8be3a1022756b12c01564c89e9682026 |
#
#
# setup.py
#
# Installation script to get setuptools to install pgradd into
# a Python environment.
#
import sys
import setuptools
# Import the lengthy rich-text README as the package's long
# description:
with open('README.rst', 'r') as fh:
long_description = fh.read()
setuptools_info = {
'name': 'pgradd',
'version': '2.2',
'author': 'Vlachos Research Group',
'author_email': 'vlachos@udel.edu',
'description': 'Python package implements the Group Additivity (GA) method for estimating thermodynamic properties',
'long_description': long_description,
'zip_safe': True,
'url': 'https://github.com/VlachosGroup/PythonGroupAdditivity',
'packages': setuptools.find_packages(),
'include_package_data': True,
'exclude_package_data': {
'': [ 'README.rst', 'docs', 'example', 'tests', 'PKG-INFO', 'LICENSE.md' ]
},
'install_requires': [
'pmutt>=1.2.14',
'scipy>=1.1.0',
'numpy>=1.15.1',
'IPython>=7.0.0',
'PyYAML>=3.0',
],
'dependency_links': [
'git+https://github.com/rdkit/rdkit/',
],
'classifiers': [
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Intended Audience :: Science/Research",
"Topic :: Scientific/Engineering :: Chemistry",
],
}
if sys.version_info[0] >= 3:
#
# Augment for Python 3 setuptools:
#
setuptools_info['long_description_content_type'] = 'text/x-rst'
setuptools.setup(**setuptools_info)
| VlachosGroup/VlachosGroupAdditivity | setup.py | Python | mit | 1,456 | [
"RDKit"
] | 04f4f20eef87996368f4dc1da356bb36a280746884730861bf5a93a866cbebc3 |
# module pyparsing.py
#
# Copyright (c) 2003-2021 Paul T. McGuire
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__doc__ = """
pyparsing module - Classes and methods to define and execute parsing grammars
=============================================================================
The pyparsing module is an alternative approach to creating and
executing simple grammars, vs. the traditional lex/yacc approach, or the
use of regular expressions. With pyparsing, you don't need to learn
a new syntax for defining grammars or matching expressions - the parsing
module provides a library of classes that you use to construct the
grammar directly in Python.
Here is a program to parse "Hello, World!" (or any greeting of the form
``"<salutation>, <addressee>!"``), built up using :class:`Word`,
:class:`Literal`, and :class:`And` elements
(the :meth:`'+'<ParserElement.__add__>` operators create :class:`And` expressions,
and the strings are auto-converted to :class:`Literal` expressions)::
from pip._vendor.pyparsing import Word, alphas
# define grammar of a greeting
greet = Word(alphas) + "," + Word(alphas) + "!"
hello = "Hello, World!"
print(hello, "->", greet.parse_string(hello))
The program outputs the following::
Hello, World! -> ['Hello', ',', 'World', '!']
The Python representation of the grammar is quite readable, owing to the
self-explanatory class names, and the use of :class:`'+'<And>`,
:class:`'|'<MatchFirst>`, :class:`'^'<Or>` and :class:`'&'<Each>` operators.
The :class:`ParseResults` object returned from
:class:`ParserElement.parseString` can be
accessed as a nested list, a dictionary, or an object with named
attributes.
The pyparsing module handles some of the problems that are typically
vexing when writing text parsers:
- extra or missing whitespace (the above program will also handle
"Hello,World!", "Hello , World !", etc.)
- quoted strings
- embedded comments
Getting Started -
-----------------
Visit the classes :class:`ParserElement` and :class:`ParseResults` to
see the base classes that most other pyparsing
classes inherit from. Use the docstrings for examples of how to:
- construct literal match expressions from :class:`Literal` and
:class:`CaselessLiteral` classes
- construct character word-group expressions using the :class:`Word`
class
- see how to create repetitive expressions using :class:`ZeroOrMore`
and :class:`OneOrMore` classes
- use :class:`'+'<And>`, :class:`'|'<MatchFirst>`, :class:`'^'<Or>`,
and :class:`'&'<Each>` operators to combine simple expressions into
more complex ones
- associate names with your parsed results using
:class:`ParserElement.setResultsName`
- access the parsed data, which is returned as a :class:`ParseResults`
object
- find some helpful expression short-cuts like :class:`delimitedList`
and :class:`oneOf`
- find more useful common expressions in the :class:`pyparsing_common`
namespace class
"""
from typing import NamedTuple
class version_info(NamedTuple):
major: int
minor: int
micro: int
releaselevel: str
serial: int
@property
def __version__(self):
return "{}.{}.{}".format(self.major, self.minor, self.micro) + (
"{}{}{}".format(
"r" if self.releaselevel[0] == "c" else "",
self.releaselevel[0],
self.serial,
),
"",
)[self.releaselevel == "final"]
def __str__(self):
return "{} {} / {}".format(__name__, self.__version__, __version_time__)
def __repr__(self):
return "{}.{}({})".format(
__name__,
type(self).__name__,
", ".join("{}={!r}".format(*nv) for nv in zip(self._fields, self)),
)
__version_info__ = version_info(3, 0, 7, "final", 0)
__version_time__ = "15 Jan 2022 04:10 UTC"
__version__ = __version_info__.__version__
__versionTime__ = __version_time__
__author__ = "Paul McGuire <ptmcg.gm+pyparsing@gmail.com>"
from .util import *
from .exceptions import *
from .actions import *
from .core import __diag__, __compat__
from .results import *
from .core import *
from .core import _builtin_exprs as core_builtin_exprs
from .helpers import *
from .helpers import _builtin_exprs as helper_builtin_exprs
from .unicode import unicode_set, UnicodeRangeList, pyparsing_unicode as unicode
from .testing import pyparsing_test as testing
from .common import (
pyparsing_common as common,
_builtin_exprs as common_builtin_exprs,
)
# define backward compat synonyms
if "pyparsing_unicode" not in globals():
pyparsing_unicode = unicode
if "pyparsing_common" not in globals():
pyparsing_common = common
if "pyparsing_test" not in globals():
pyparsing_test = testing
core_builtin_exprs += common_builtin_exprs + helper_builtin_exprs
__all__ = [
"__version__",
"__version_time__",
"__author__",
"__compat__",
"__diag__",
"And",
"AtLineStart",
"AtStringStart",
"CaselessKeyword",
"CaselessLiteral",
"CharsNotIn",
"Combine",
"Dict",
"Each",
"Empty",
"FollowedBy",
"Forward",
"GoToColumn",
"Group",
"IndentedBlock",
"Keyword",
"LineEnd",
"LineStart",
"Literal",
"Located",
"PrecededBy",
"MatchFirst",
"NoMatch",
"NotAny",
"OneOrMore",
"OnlyOnce",
"OpAssoc",
"Opt",
"Optional",
"Or",
"ParseBaseException",
"ParseElementEnhance",
"ParseException",
"ParseExpression",
"ParseFatalException",
"ParseResults",
"ParseSyntaxException",
"ParserElement",
"PositionToken",
"QuotedString",
"RecursiveGrammarException",
"Regex",
"SkipTo",
"StringEnd",
"StringStart",
"Suppress",
"Token",
"TokenConverter",
"White",
"Word",
"WordEnd",
"WordStart",
"ZeroOrMore",
"Char",
"alphanums",
"alphas",
"alphas8bit",
"any_close_tag",
"any_open_tag",
"c_style_comment",
"col",
"common_html_entity",
"counted_array",
"cpp_style_comment",
"dbl_quoted_string",
"dbl_slash_comment",
"delimited_list",
"dict_of",
"empty",
"hexnums",
"html_comment",
"identchars",
"identbodychars",
"java_style_comment",
"line",
"line_end",
"line_start",
"lineno",
"make_html_tags",
"make_xml_tags",
"match_only_at_col",
"match_previous_expr",
"match_previous_literal",
"nested_expr",
"null_debug_action",
"nums",
"one_of",
"printables",
"punc8bit",
"python_style_comment",
"quoted_string",
"remove_quotes",
"replace_with",
"replace_html_entity",
"rest_of_line",
"sgl_quoted_string",
"srange",
"string_end",
"string_start",
"trace_parse_action",
"unicode_string",
"with_attribute",
"indentedBlock",
"original_text_for",
"ungroup",
"infix_notation",
"locatedExpr",
"with_class",
"CloseMatch",
"token_map",
"pyparsing_common",
"pyparsing_unicode",
"unicode_set",
"condition_as_parse_action",
"pyparsing_test",
# pre-PEP8 compatibility names
"__versionTime__",
"anyCloseTag",
"anyOpenTag",
"cStyleComment",
"commonHTMLEntity",
"countedArray",
"cppStyleComment",
"dblQuotedString",
"dblSlashComment",
"delimitedList",
"dictOf",
"htmlComment",
"javaStyleComment",
"lineEnd",
"lineStart",
"makeHTMLTags",
"makeXMLTags",
"matchOnlyAtCol",
"matchPreviousExpr",
"matchPreviousLiteral",
"nestedExpr",
"nullDebugAction",
"oneOf",
"opAssoc",
"pythonStyleComment",
"quotedString",
"removeQuotes",
"replaceHTMLEntity",
"replaceWith",
"restOfLine",
"sglQuotedString",
"stringEnd",
"stringStart",
"traceParseAction",
"unicodeString",
"withAttribute",
"indentedBlock",
"originalTextFor",
"infixNotation",
"locatedExpr",
"withClass",
"tokenMap",
"conditionAsParseAction",
"autoname_elements",
]
| pradyunsg/pip | src/pip/_vendor/pyparsing/__init__.py | Python | mit | 9,107 | [
"VisIt"
] | 8d785e193153d5beabff85b1b8e134b9552a8a8b8b277587cce49ca4b89e4604 |
#***********************************************************************
# This code is part of CmplServer
#
# Copyright (C) 2013, 2014
# Mike Steglich - Technical University of Applied Sciences
# Wildau, Germany
#
# CmplServer is a project of the Technical University of
# Applied Sciences Wildau and the Institute for Operations Research
# and Business Management at the Martin Luther University
# Halle-Wittenberg.
# Please visit the project homepage <www.coliop.org>
#
# CmplServer is free software; you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# CmplServer is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
# License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
#
#**********************************************************************
#!/usr/bin/python
import clr
clr.AddReference('System.Windows.Forms')
clr.AddReference('System.Drawing')
from System.Windows.Forms import Form, Label, TabAlignment, TabControl, TabPage, DockStyle, Panel, ToolStripMenuItem, Button, TextBox, FormBorderStyle,FolderBrowserDialog,DialogResult,MessageBox,MessageBoxButtons, ScrollBars
from System.Drawing import Point, Font
from System.IO import FileStream, FileMode
import sys
import os
import thread
import xmlrpclib
import socket
import threading
#************* pyCmpl and cmplserver includes ****************
try:
cmplPath=os.environ['CMPLPATH']
except:
raise Exception("Internal error - cannot find CMPL path")
cmplbin=cmplPath+os.sep+"bin"+os.sep+"cmpl.exe"
os.chdir(cmplPath)
os.environ.update({'CMPLBINARY':cmplbin })
cmplServerPath=cmplPath+os.sep+"cmplServer"
os.environ.update({'CMPLSERVERPATH':cmplServerPath })
sys.path.append(cmplPath)
from cmplServer import *
from pyCmpl.CmplDefs import *
#************* end pyCmpl and cmplserver includes ****************
#************* CmplServerHandler ********************************
class CmplServerHandler():
#*********** constructor ************
def __init__(self):
self.__serverMode = STANDALONE_SERVER
self.__port=8008
self.__grid=False
self.__getPort()
self.__ipyPath=os.path.dirname(os.path.abspath(sys.argv[0])) + os.sep + ".." + os.sep + ".." + os.sep+ "IronPython"+os.sep+"ipyw.exe"
self.__serverScript=os.path.dirname(os.path.abspath(sys.argv[0])) +os.sep +"cmplServerHandler.py"
#*********** end constructor *********
#*********** gridMode ****************
@property
def gridMode(self):
self.__getPort()
return self.__grid
#*********** end gridMode ************
#*********** port ********************
@property
def port(self):
return self.__port
#*********** port ********************
#*********** getport *****************
def __getPort(self):
try:
self.__optFileName=os.environ['CMPLSERVERPATH']+ os.sep + "cmplServer.opt"
except:
self.__optFileName=os.path.dirname(os.path.abspath(sys.argv[0])) + os.sep + ".." + os.sep + "cmplServer.opt"
self.__grid=False
try:
f = open(self.__optFileName, "r")
for line in f:
ret=line.split("=")
if ret[0].strip().lower() == "cmplserverport":
if CmplTools.strIsNumber(ret[1].strip()):
self.__port = int(ret[1].strip())
else:
MessageBox.Show("Wrong option maxProblems in CmplServer option file <"+self.__optFileName+"> default value is used" )
self.__port=8008
continue
if ret[0].strip().lower() == "cmplgridscheduler":
self.__grid=True
f.close()
except IOError, e:
MessageBox.Show("Cannot read CmplServer option file <"+self.__optFileName+"> " )
#*********** end getport *************
#*********** startServer *************
def startCmplServer(self):
self.__getPort()
if not self.__grid:
os.popen(self.__ipyPath+" "+self.__serverScript+ " -start " + str(self.__port) )
else:
os.popen(self.__ipyPath+" "+self.__serverScript+ " -startInGrid " + str(self.__port) )
#*********** end startServer *********
#*********** startScheduler *************
def startScheduler(self):
self.__getPort()
os.popen(self.__ipyPath+" "+self.__serverScript+ " -startScheduler " + str(self.__port))
#*********** end startScheduler *********
#*********** stopServer ***************
def stopServer(self):
self.__getPort()
os.popen(self.__ipyPath+" "+self.__serverScript+ " -stop " + str(self.__port))
#*********** end stopServer ***********
#*********** status ****************
def status(self):
self.__getPort()
url = "http://127.0.0.1:" +str(self.__port)
ret=None
try:
cmplServer = xmlrpclib.ServerProxy(url)
ret = cmplServer.status()[0]
if ret==None:
ret=CMPL_UNKNOWN
except:
ret=CMPL_UNKNOWN
return ret
#*********** end status ************
#************* end CmplServerHandler ****************************
#************* CMPLServerUI *************************************
class CMPLServerUI(Form):
#*************** constructor *****************************
def __init__(self):
self.__serverHandler=None
self.__cmplServerPath = os.path.expanduser("~") + os.sep+ "CmplServer" +os.sep
self.__logFileName = self.__cmplServerPath + "cmplServer.log"
self.__schedulerLogFileName = self.__cmplServerPath + "cmplGridScheduler.log"
if os.path.exists(self.__cmplServerPath) == False:
try:
os.mkdir(self.__cmplServerPath)
except OSError, e:
MessageBox.Show("Cannot create CmplServer path <"+self.__cmplServerPath+">" )
sys.exit()
try:
f1 = open(self.__logFileName, "a")
f2 = open(self.__schedulerLogFileName, "a")
f1.close()
f2.close()
self.__logFile=open(self.__logFileName, "r")
self.__logFile.seek(0, 2)
self.__schedulerLogFile=open(self.__schedulerLogFileName, "r")
self.__schedulerLogFile.seek(0, 2)
except IOError, e:
MessageBox.Show( "Cannot read CmplServer log file <"+self.__logFile+"> " + str(e) )
sys.exit()
self.__stopLogging=False
self.Text = "CMPLServer"
self.Height=500
self.Width = 950
self.infoLabel=Label()
self.infoLabel.Location = Point(10,10)
self.infoLabel.Width = 600
self.infoLabel.Font=Font("Monaco",10)
self.bStart=Button()
self.bStart.Text = "Start Server"
self.bStart.Location = Point(10,50)
self.bStart.Width = 150
self.bStart.Height = 35
self.bStart.Click += self.startServer
self.bStartScheduler=Button()
self.bStartScheduler.Text = "Start Scheduler"
self.bStartScheduler.Location = Point(170,50)
self.bStartScheduler.Width = 150
self.bStartScheduler.Height = 35
self.bStartScheduler.Click += self.startScheduler
self.bStop=Button()
self.bStop.Text = "Stop"
self.bStop.Location = Point(330,50)
self.bStop.Width = 150
self.bStop.Height = 35
self.bStop.Click += self.stopServer
self.bStop.Enabled = False
self.__serverHandler=CmplServerHandler()
if self.__serverHandler.status()!=CMPL_UNKNOWN and self.__serverHandler.status()!=CMPLSERVER_ERROR and self.__serverHandler.status()!=CMPLGRID_SCHEDULER_ERROR:
self.bStop.Enabled = True
self.bStartScheduler.Enabled = False
self.bStart.Enabled = False
if not self.__serverHandler.gridMode:
self.infoLabel.Text="CmplServer is running under http://"+socket.gethostbyname(socket.gethostname())+":"+str(self.__serverHandler.port)
else:
self.infoLabel.Text="CmplServer is running CMPLGrid"
else:
self.bStop.Enabled = False
self.bStartScheduler.Enabled = True
self.bStart.Enabled = True
self.infoLabel.Text="CmplServer is not running"
self.AcceptButton = self.bStart
self.AcceptButton = self.bStartScheduler
self.AcceptButton = self.bStop
self.buttonPanel=Panel()
self.buttonPanel.Dock=DockStyle.Bottom
self.buttonPanel.Height=100
self.buttonPanel.Controls.Add(self.infoLabel)
self.buttonPanel.Controls.Add(self.bStart)
self.buttonPanel.Controls.Add(self.bStartScheduler)
self.buttonPanel.Controls.Add(self.bStop)
self.tabControl = TabControl()
self.tabControl.Location = Point(10,60)
self.tabControl.Dock = DockStyle.Fill
self.Controls.Add(self.tabControl)
self.addTabPage("Server log")
self.addTabPage("Scheduler log")
self.addTabPage("Options")
self.Controls.Add(self.buttonPanel)
self.readOptionFile()
self.CenterToScreen()
self.Closing += self.onExit
self.tabControl.TabPages[0].Controls[0].Font=Font("Courier New",8)
self.tabControl.TabPages[1].Controls[0].Font=Font("Courier New",8)
thread.start_new_thread(self.storeOutput, (0,) )
thread.start_new_thread(self.storeOutput, (1,) )
#*************** end constructor **************************
#*************** addTabPage *******************************
def addTabPage(self, label, text=""):
textBox = TextBox()
textBox.Multiline = True
textBox.Dock = DockStyle.Fill
textBox.ScrollBars = ScrollBars.Vertical
textBox.AcceptsReturn = True
textBox.AcceptsTab = True
textBox.WordWrap = True
textBox.Font=Font("Courier New",10)
textBox.Text = text
tPage = TabPage()
tPage.Text = label
tPage.Controls.Add(textBox)
self.tabControl.TabPages.Add(tPage)
#*************** end addTabPage ****************************
#*************** startServer *******************************
def startServer(self,key,e):
self.infoLabel.Text="starting CmplServer ..."
self.tabControl.TabPages[2].Controls[0].Enabled=False
self.writeOptionFile()
self.bStartScheduler.Enabled = False
self.bStart.Enabled = False
self.tabControl.SelectTab(0)
self.__serverHandler.startCmplServer()
tries=0
while self.__serverHandler.status()==CMPL_UNKNOWN:
time.sleep(1)
tries+=1
if tries>90:
break
if self.__serverHandler.status()!=CMPL_UNKNOWN and self.__serverHandler.status()!=CMPLSERVER_ERROR and self.__serverHandler.status()!=CMPLGRID_SCHEDULER_ERROR:
if not self.__serverHandler.gridMode:
self.infoLabel.Text="CmplServer is running under http://"+socket.gethostbyname(socket.gethostname())+":"+str(self.__serverHandler.port)
else:
self.infoLabel.Text="CmplServer is running CMPLGrid"
self.bStop.Enabled = True
self.bStartScheduler.Enabled = False
self.bStart.Enabled = False
self.tabControl.TabPages[2].Controls[0].Enabled=False
self.__stopLogging=False
else:
self.infoLabel.Text+="failed ... "
self.tabControl.TabPages[2].Controls[0].Enabled=True
self.bStartScheduler.Enabled = True
self.bStart.Enabled = True
self.infoLabel.Text+="CmplServer is not running"
#*************** startServer *******************************
#*************** startScheduler ****************************
def startScheduler(self, key,e):
self.infoLabel.Text="starting CmplGridScheduler ..."
self.tabControl.TabPages[2].Controls[0].Enabled=False
self.writeOptionFile()
self.bStartScheduler.Enabled = False
self.bStart.Enabled = False
self.tabControl.SelectTab(1)
self.__serverHandler.startScheduler()
tries=0
while self.__serverHandler.status()!=CMPLGRID_SCHEDULER_OK:
time.sleep(1)
tries+=1
if tries>90:
break
if self.__serverHandler.status()!=CMPL_UNKNOWN and self.__serverHandler.status()!=CMPLSERVER_ERROR and self.__serverHandler.status()!=CMPLGRID_SCHEDULER_ERROR:
self.infoLabel.Text="CmplGridScheduler is running under http://"+socket.gethostbyname(socket.gethostname())+":"+str(self.__serverHandler.port)
self.bStop.Enabled = True
self.bStartScheduler.Enabled = False
self.bStart.Enabled = False
self.tabControl.TabPages[2].Controls[0].Enabled=False
self.__stopLogging=False
else:
self.infoLabel.Text+="failed ... "
self.tabControl.TabPages[2].Controls[0].Enabled=True
self.bStartScheduler.Enabled = True
self.bStart.Enabled = True
self.infoLabel.Text+="CmplServer is not running"
#*************** end startScheduler ************************
#*************** stopServerAction **************************
def stopServerAction(self):
if self.__serverHandler.status()!=CMPL_UNKNOWN :
self.__serverHandler.stopServer()
time.sleep(2)
#self.__stopLogging=True
self.infoLabel.Text="CmplServer is not running"
self.bStartScheduler.Enabled = True
self.bStart.Enabled = True
self.bStop.Enabled = False
self.tabControl.TabPages[2].Controls[0].Enabled=True
#*************** end stopServerAction **********************
#*************** stopServer ********************************
def stopServer(self,key,e):
self.stopServerAction()
#*************** end stopServer ****************************
#*************** readOptionFile ****************************
def readOptionFile(self):
fContent=""
try:
self.__optFileName=os.environ['CMPLSERVERPATH']+ os.sep + "cmplServer.opt"
except:
self.__optFileName=os.path.dirname(os.path.abspath(sys.argv[0])) + os.sep + ".." + os.sep + "cmplServer.opt"
try:
f = open(self.__optFileName, 'r')
fContent = f.read()
f.close()
self.tabControl.TabPages[2].Controls[0].Text=fContent
except Exception,e :
MessageBox.Show("Cannot read CmplServer option file <"+self.__optFileName+"> " + str(e) )
sys.exit()
#*************** end readOptionFile ************************
#*************** writeOptionFile ***************************
def writeOptionFile(self):
fContent=""
try:
self.__optFileName=os.environ['CMPLSERVERPATH']+ os.sep + "cmplServer.opt"
except:
self.__optFileName=os.path.dirname(os.path.abspath(sys.argv[0])) + os.sep + ".." + os.sep + "cmplServer.opt"
try:
f = open(self.__optFileName, 'w')
fContent=self.tabControl.TabPages[2].Controls[0].Text
f.write(fContent)
f.close()
except Exception,e :
MessageBox.Show("Cannot write CmplServer option file <"+self.__optFileName+"> " + str(e) )
sys.exit()
#*************** end writeOptionFile ***********************
#*********** storeOutput ***********************************
def storeOutput(self, tabId):
try:
#logFile = open(fName, "r")
#logFile.seek(-250, 2)
while True:
if tabId==0:
line = self.__logFile.readline()
else:
line = self.__schedulerLogFile.readline()
if len(line)>0 and not self.__stopLogging:
self.tabControl.TabPages[tabId].Controls[0].Text+=line+"\r\n"
self.tabControl.TabPages[tabId].Controls[0].SelectionStart=self.tabControl.TabPages[0].Controls[0].Text.Length
self.tabControl.TabPages[tabId].Controls[0].ScrollToCaret()
#logFile.close()
except:
self.tabControl.TabPages[tabId].Controls[0].Text="Error - Cannot read log file" +str(sys.exc_info()[1])
#*********** end __storeOutput ****************************
def onExit(self, key,e ):
self.stopServerAction()
#************* end CMPLServerUI **********************************
#************* main *********************************************
# A File is locked to ensure that the UI is only oppened one time.
try:
f = FileStream( cmplPath+os.sep+'AUTHORS.txt', FileMode.Open)
f.Lock(0, f.Length)
except:
MessageBox.Show("The CmplServer console can be opened only once." )
sys.exit()
ui=CMPLServerUI()
ui.ShowDialog()
f.Unlock(0, f.Length)
f.Close()
#************* main ********************************************* | Mangara/ArboralExplorer | lib/Cmpl/cmplServer/cmplServer/cmplServerUi.py | Python | apache-2.0 | 15,730 | [
"VisIt"
] | 8fba1487bc35b19751a7ef9b132f26930270b41a8c35989f3e0bd70947ea313e |
"""
provide a generic structure to support window functions,
similar to how we have a Groupby object
"""
from __future__ import division
import warnings
import numpy as np
from collections import defaultdict
from datetime import timedelta
from pandas.core.dtypes.generic import (
ABCSeries,
ABCDataFrame,
ABCDatetimeIndex,
ABCTimedeltaIndex,
ABCPeriodIndex,
ABCDateOffset)
from pandas.core.dtypes.common import (
is_integer,
is_bool,
is_float_dtype,
is_integer_dtype,
needs_i8_conversion,
is_timedelta64_dtype,
is_list_like,
_ensure_float64,
is_scalar)
from pandas.core.base import (PandasObject, SelectionMixin,
GroupByMixin)
import pandas.core.common as com
import pandas._libs.window as _window
from pandas import compat
from pandas.compat.numpy import function as nv
from pandas.util._decorators import (Substitution, Appender,
cache_readonly)
from pandas.core.generic import _shared_docs
from textwrap import dedent
_shared_docs = dict(**_shared_docs)
_doc_template = """
Returns
-------
same type as input
See also
--------
pandas.Series.%(name)s
pandas.DataFrame.%(name)s
"""
class _Window(PandasObject, SelectionMixin):
_attributes = ['window', 'min_periods', 'freq', 'center', 'win_type',
'axis', 'on', 'closed']
exclusions = set()
def __init__(self, obj, window=None, min_periods=None, freq=None,
center=False, win_type=None, axis=0, on=None, closed=None,
**kwargs):
if freq is not None:
warnings.warn("The freq kw is deprecated and will be removed in a "
"future version. You can resample prior to passing "
"to a window function", FutureWarning, stacklevel=3)
self.__dict__.update(kwargs)
self.blocks = []
self.obj = obj
self.on = on
self.closed = closed
self.window = window
self.min_periods = min_periods
self.freq = freq
self.center = center
self.win_type = win_type
self.win_freq = None
self.axis = obj._get_axis_number(axis) if axis is not None else None
self.validate()
@property
def _constructor(self):
return Window
@property
def is_datetimelike(self):
return None
@property
def _on(self):
return None
@property
def is_freq_type(self):
return self.win_type == 'freq'
def validate(self):
if self.center is not None and not is_bool(self.center):
raise ValueError("center must be a boolean")
if self.min_periods is not None and not \
is_integer(self.min_periods):
raise ValueError("min_periods must be an integer")
if self.closed is not None and self.closed not in \
['right', 'both', 'left', 'neither']:
raise ValueError("closed must be 'right', 'left', 'both' or "
"'neither'")
def _convert_freq(self, how=None):
""" resample according to the how, return a new object """
obj = self._selected_obj
index = None
if (self.freq is not None and
isinstance(obj, (ABCSeries, ABCDataFrame))):
if how is not None:
warnings.warn("The how kw argument is deprecated and removed "
"in a future version. You can resample prior "
"to passing to a window function", FutureWarning,
stacklevel=6)
obj = obj.resample(self.freq).aggregate(how or 'asfreq')
return obj, index
def _create_blocks(self, how):
""" split data into blocks & return conformed data """
obj, index = self._convert_freq(how)
if index is not None:
index = self._on
# filter out the on from the object
if self.on is not None:
if obj.ndim == 2:
obj = obj.reindex(columns=obj.columns.difference([self.on]),
copy=False)
blocks = obj._to_dict_of_blocks(copy=False).values()
return blocks, obj, index
def _gotitem(self, key, ndim, subset=None):
"""
sub-classes to define
return a sliced object
Parameters
----------
key : string / list of selections
ndim : 1,2
requested ndim of result
subset : object, default None
subset to act on
"""
# create a new object to prevent aliasing
if subset is None:
subset = self.obj
self = self._shallow_copy(subset)
self._reset_cache()
if subset.ndim == 2:
if is_scalar(key) and key in subset or is_list_like(key):
self._selection = key
return self
def __getattr__(self, attr):
if attr in self._internal_names_set:
return object.__getattribute__(self, attr)
if attr in self.obj:
return self[attr]
raise AttributeError("%r object has no attribute %r" %
(type(self).__name__, attr))
def _dir_additions(self):
return self.obj._dir_additions()
def _get_window(self, other=None):
return self.window
@property
def _window_type(self):
return self.__class__.__name__
def __unicode__(self):
""" provide a nice str repr of our rolling object """
attrs = ["{k}={v}".format(k=k, v=getattr(self, k))
for k in self._attributes
if getattr(self, k, None) is not None]
return "{klass} [{attrs}]".format(klass=self._window_type,
attrs=','.join(attrs))
def _get_index(self, index=None):
"""
Return index as ndarrays
Returns
-------
tuple of (index, index_as_ndarray)
"""
if self.is_freq_type:
if index is None:
index = self._on
return index, index.asi8
return index, index
def _prep_values(self, values=None, kill_inf=True, how=None):
if values is None:
values = getattr(self._selected_obj, 'values', self._selected_obj)
# GH #12373 : rolling functions error on float32 data
# make sure the data is coerced to float64
if is_float_dtype(values.dtype):
values = _ensure_float64(values)
elif is_integer_dtype(values.dtype):
values = _ensure_float64(values)
elif needs_i8_conversion(values.dtype):
raise NotImplementedError("ops for {action} for this "
"dtype {dtype} are not "
"implemented".format(
action=self._window_type,
dtype=values.dtype))
else:
try:
values = _ensure_float64(values)
except (ValueError, TypeError):
raise TypeError("cannot handle this type -> {0}"
"".format(values.dtype))
if kill_inf:
values = values.copy()
values[np.isinf(values)] = np.NaN
return values
def _wrap_result(self, result, block=None, obj=None):
""" wrap a single result """
if obj is None:
obj = self._selected_obj
index = obj.index
if isinstance(result, np.ndarray):
# coerce if necessary
if block is not None:
if is_timedelta64_dtype(block.values.dtype):
from pandas import to_timedelta
result = to_timedelta(
result.ravel(), unit='ns').values.reshape(result.shape)
if result.ndim == 1:
from pandas import Series
return Series(result, index, name=obj.name)
return type(obj)(result, index=index, columns=block.columns)
return result
def _wrap_results(self, results, blocks, obj):
"""
wrap the results
Paramters
---------
results : list of ndarrays
blocks : list of blocks
obj : conformed data (may be resampled)
"""
from pandas import Series, concat
from pandas.core.index import _ensure_index
final = []
for result, block in zip(results, blocks):
result = self._wrap_result(result, block=block, obj=obj)
if result.ndim == 1:
return result
final.append(result)
# if we have an 'on' column
# we want to put it back into the results
# in the same location
columns = self._selected_obj.columns
if self.on is not None and not self._on.equals(obj.index):
name = self._on.name
final.append(Series(self._on, index=obj.index, name=name))
if self._selection is not None:
selection = _ensure_index(self._selection)
# need to reorder to include original location of
# the on column (if its not already there)
if name not in selection:
columns = self.obj.columns
indexer = columns.get_indexer(selection.tolist() + [name])
columns = columns.take(sorted(indexer))
if not len(final):
return obj.astype('float64')
return concat(final, axis=1).reindex(columns=columns, copy=False)
def _center_window(self, result, window):
""" center the result in the window """
if self.axis > result.ndim - 1:
raise ValueError("Requested axis is larger then no. of argument "
"dimensions")
offset = _offset(window, True)
if offset > 0:
if isinstance(result, (ABCSeries, ABCDataFrame)):
result = result.slice_shift(-offset, axis=self.axis)
else:
lead_indexer = [slice(None)] * result.ndim
lead_indexer[self.axis] = slice(offset, None)
result = np.copy(result[tuple(lead_indexer)])
return result
def aggregate(self, arg, *args, **kwargs):
result, how = self._aggregate(arg, *args, **kwargs)
if result is None:
return self.apply(arg, args=args, kwargs=kwargs)
return result
agg = aggregate
_shared_docs['sum'] = dedent("""
%(name)s sum
Parameters
----------
how : string, default None
.. deprecated:: 0.18.0
Method for down- or re-sampling""")
_shared_docs['mean'] = dedent("""
%(name)s mean
Parameters
----------
how : string, default None
.. deprecated:: 0.18.0
Method for down- or re-sampling""")
class Window(_Window):
"""
Provides rolling window calculations.
.. versionadded:: 0.18.0
Parameters
----------
window : int, or offset
Size of the moving window. This is the number of observations used for
calculating the statistic. Each window will be a fixed size.
If its an offset then this will be the time period of each window. Each
window will be a variable sized based on the observations included in
the time-period. This is only valid for datetimelike indexes. This is
new in 0.19.0
min_periods : int, default None
Minimum number of observations in window required to have a value
(otherwise result is NA). For a window that is specified by an offset,
this will default to 1.
freq : string or DateOffset object, optional (default None)
.. deprecated:: 0.18.0
Frequency to conform the data to before computing the statistic.
Specified as a frequency string or DateOffset object.
center : boolean, default False
Set the labels at the center of the window.
win_type : string, default None
Provide a window type. See the notes below.
on : string, optional
For a DataFrame, column on which to calculate
the rolling window, rather than the index
closed : string, default None
Make the interval closed on the 'right', 'left', 'both' or
'neither' endpoints.
For offset-based windows, it defaults to 'right'.
For fixed windows, defaults to 'both'. Remaining cases not implemented
for fixed windows.
.. versionadded:: 0.20.0
axis : int or string, default 0
Returns
-------
a Window or Rolling sub-classed for the particular operation
Examples
--------
>>> df = pd.DataFrame({'B': [0, 1, 2, np.nan, 4]})
>>> df
B
0 0.0
1 1.0
2 2.0
3 NaN
4 4.0
Rolling sum with a window length of 2, using the 'triang'
window type.
>>> df.rolling(2, win_type='triang').sum()
B
0 NaN
1 1.0
2 2.5
3 NaN
4 NaN
Rolling sum with a window length of 2, min_periods defaults
to the window length.
>>> df.rolling(2).sum()
B
0 NaN
1 1.0
2 3.0
3 NaN
4 NaN
Same as above, but explicity set the min_periods
>>> df.rolling(2, min_periods=1).sum()
B
0 0.0
1 1.0
2 3.0
3 2.0
4 4.0
A ragged (meaning not-a-regular frequency), time-indexed DataFrame
>>> df = pd.DataFrame({'B': [0, 1, 2, np.nan, 4]},
....: index = [pd.Timestamp('20130101 09:00:00'),
....: pd.Timestamp('20130101 09:00:02'),
....: pd.Timestamp('20130101 09:00:03'),
....: pd.Timestamp('20130101 09:00:05'),
....: pd.Timestamp('20130101 09:00:06')])
>>> df
B
2013-01-01 09:00:00 0.0
2013-01-01 09:00:02 1.0
2013-01-01 09:00:03 2.0
2013-01-01 09:00:05 NaN
2013-01-01 09:00:06 4.0
Contrasting to an integer rolling window, this will roll a variable
length window corresponding to the time period.
The default for min_periods is 1.
>>> df.rolling('2s').sum()
B
2013-01-01 09:00:00 0.0
2013-01-01 09:00:02 1.0
2013-01-01 09:00:03 3.0
2013-01-01 09:00:05 NaN
2013-01-01 09:00:06 4.0
Notes
-----
By default, the result is set to the right edge of the window. This can be
changed to the center of the window by setting ``center=True``.
The `freq` keyword is used to conform time series data to a specified
frequency by resampling the data. This is done with the default parameters
of :meth:`~pandas.Series.resample` (i.e. using the `mean`).
To learn more about the offsets & frequency strings, please see `this link
<http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__.
The recognized win_types are:
* ``boxcar``
* ``triang``
* ``blackman``
* ``hamming``
* ``bartlett``
* ``parzen``
* ``bohman``
* ``blackmanharris``
* ``nuttall``
* ``barthann``
* ``kaiser`` (needs beta)
* ``gaussian`` (needs std)
* ``general_gaussian`` (needs power, width)
* ``slepian`` (needs width).
"""
def validate(self):
super(Window, self).validate()
window = self.window
if isinstance(window, (list, tuple, np.ndarray)):
pass
elif is_integer(window):
if window < 0:
raise ValueError("window must be non-negative")
try:
import scipy.signal as sig
except ImportError:
raise ImportError('Please install scipy to generate window '
'weight')
if not isinstance(self.win_type, compat.string_types):
raise ValueError('Invalid win_type {0}'.format(self.win_type))
if getattr(sig, self.win_type, None) is None:
raise ValueError('Invalid win_type {0}'.format(self.win_type))
else:
raise ValueError('Invalid window {0}'.format(window))
def _prep_window(self, **kwargs):
"""
provide validation for our window type, return the window
we have already been validated
"""
window = self._get_window()
if isinstance(window, (list, tuple, np.ndarray)):
return com._asarray_tuplesafe(window).astype(float)
elif is_integer(window):
import scipy.signal as sig
# the below may pop from kwargs
def _validate_win_type(win_type, kwargs):
arg_map = {'kaiser': ['beta'],
'gaussian': ['std'],
'general_gaussian': ['power', 'width'],
'slepian': ['width']}
if win_type in arg_map:
return tuple([win_type] + _pop_args(win_type,
arg_map[win_type],
kwargs))
return win_type
def _pop_args(win_type, arg_names, kwargs):
msg = '%s window requires %%s' % win_type
all_args = []
for n in arg_names:
if n not in kwargs:
raise ValueError(msg % n)
all_args.append(kwargs.pop(n))
return all_args
win_type = _validate_win_type(self.win_type, kwargs)
# GH #15662. `False` makes symmetric window, rather than periodic.
return sig.get_window(win_type, window, False).astype(float)
def _apply_window(self, mean=True, how=None, **kwargs):
"""
Applies a moving window of type ``window_type`` on the data.
Parameters
----------
mean : boolean, default True
If True computes weighted mean, else weighted sum
how : string, default to None
.. deprecated:: 0.18.0
how to resample
Returns
-------
y : type of input argument
"""
window = self._prep_window(**kwargs)
center = self.center
blocks, obj, index = self._create_blocks(how=how)
results = []
for b in blocks:
try:
values = self._prep_values(b.values)
except TypeError:
results.append(b.values.copy())
continue
if values.size == 0:
results.append(values.copy())
continue
offset = _offset(window, center)
additional_nans = np.array([np.NaN] * offset)
def f(arg, *args, **kwargs):
minp = _use_window(self.min_periods, len(window))
return _window.roll_window(np.concatenate((arg,
additional_nans))
if center else arg, window, minp,
avg=mean)
result = np.apply_along_axis(f, self.axis, values)
if center:
result = self._center_window(result, window)
results.append(result)
return self._wrap_results(results, blocks, obj)
_agg_doc = dedent("""
Examples
--------
>>> df = pd.DataFrame(np.random.randn(10, 3), columns=['A', 'B', 'C'])
>>> df
A B C
0 -2.385977 -0.102758 0.438822
1 -1.004295 0.905829 -0.954544
2 0.735167 -0.165272 -1.619346
3 -0.702657 -1.340923 -0.706334
4 -0.246845 0.211596 -0.901819
5 2.463718 3.157577 -1.380906
6 -1.142255 2.340594 -0.039875
7 1.396598 -1.647453 1.677227
8 -0.543425 1.761277 -0.220481
9 -0.640505 0.289374 -1.550670
>>> df.rolling(3, win_type='boxcar').agg('mean')
A B C
0 NaN NaN NaN
1 NaN NaN NaN
2 -0.885035 0.212600 -0.711689
3 -0.323928 -0.200122 -1.093408
4 -0.071445 -0.431533 -1.075833
5 0.504739 0.676083 -0.996353
6 0.358206 1.903256 -0.774200
7 0.906020 1.283573 0.085482
8 -0.096361 0.818139 0.472290
9 0.070889 0.134399 -0.031308
See also
--------
pandas.DataFrame.rolling.aggregate
pandas.DataFrame.aggregate
""")
@Appender(_agg_doc)
@Appender(_shared_docs['aggregate'] % dict(
versionadded='',
klass='Series/DataFrame'))
def aggregate(self, arg, *args, **kwargs):
result, how = self._aggregate(arg, *args, **kwargs)
if result is None:
# these must apply directly
result = arg(self)
return result
agg = aggregate
@Substitution(name='window')
@Appender(_doc_template)
@Appender(_shared_docs['sum'])
def sum(self, *args, **kwargs):
nv.validate_window_func('sum', args, kwargs)
return self._apply_window(mean=False, **kwargs)
@Substitution(name='window')
@Appender(_doc_template)
@Appender(_shared_docs['mean'])
def mean(self, *args, **kwargs):
nv.validate_window_func('mean', args, kwargs)
return self._apply_window(mean=True, **kwargs)
class _GroupByMixin(GroupByMixin):
""" provide the groupby facilities """
def __init__(self, obj, *args, **kwargs):
parent = kwargs.pop('parent', None) # noqa
groupby = kwargs.pop('groupby', None)
if groupby is None:
groupby, obj = obj, obj.obj
self._groupby = groupby
self._groupby.mutated = True
self._groupby.grouper.mutated = True
super(GroupByMixin, self).__init__(obj, *args, **kwargs)
count = GroupByMixin._dispatch('count')
corr = GroupByMixin._dispatch('corr', other=None, pairwise=None)
cov = GroupByMixin._dispatch('cov', other=None, pairwise=None)
def _apply(self, func, name, window=None, center=None,
check_minp=None, how=None, **kwargs):
"""
dispatch to apply; we are stripping all of the _apply kwargs and
performing the original function call on the grouped object
"""
def f(x, name=name, *args):
x = self._shallow_copy(x)
if isinstance(name, compat.string_types):
return getattr(x, name)(*args, **kwargs)
return x.apply(name, *args, **kwargs)
return self._groupby.apply(f)
class _Rolling(_Window):
@property
def _constructor(self):
return Rolling
def _apply(self, func, name=None, window=None, center=None,
check_minp=None, how=None, **kwargs):
"""
Rolling statistical measure using supplied function. Designed to be
used with passed-in Cython array-based functions.
Parameters
----------
func : string/callable to apply
name : string, optional
name of this function
window : int/array, default to _get_window()
center : boolean, default to self.center
check_minp : function, default to _use_window
how : string, default to None
.. deprecated:: 0.18.0
how to resample
Returns
-------
y : type of input
"""
if center is None:
center = self.center
if window is None:
window = self._get_window()
if check_minp is None:
check_minp = _use_window
blocks, obj, index = self._create_blocks(how=how)
index, indexi = self._get_index(index=index)
results = []
for b in blocks:
try:
values = self._prep_values(b.values)
except TypeError:
results.append(b.values.copy())
continue
if values.size == 0:
results.append(values.copy())
continue
# if we have a string function name, wrap it
if isinstance(func, compat.string_types):
cfunc = getattr(_window, func, None)
if cfunc is None:
raise ValueError("we do not support this function "
"in _window.{0}".format(func))
def func(arg, window, min_periods=None, closed=None):
minp = check_minp(min_periods, window)
# ensure we are only rolling on floats
arg = _ensure_float64(arg)
return cfunc(arg,
window, minp, indexi, closed, **kwargs)
# calculation function
if center:
offset = _offset(window, center)
additional_nans = np.array([np.NaN] * offset)
def calc(x):
return func(np.concatenate((x, additional_nans)),
window, min_periods=self.min_periods,
closed=self.closed)
else:
def calc(x):
return func(x, window, min_periods=self.min_periods,
closed=self.closed)
with np.errstate(all='ignore'):
if values.ndim > 1:
result = np.apply_along_axis(calc, self.axis, values)
else:
result = calc(values)
if center:
result = self._center_window(result, window)
results.append(result)
return self._wrap_results(results, blocks, obj)
class _Rolling_and_Expanding(_Rolling):
_shared_docs['count'] = """%(name)s count of number of non-NaN
observations inside provided window."""
def count(self):
blocks, obj, index = self._create_blocks(how=None)
index, indexi = self._get_index(index=index)
window = self._get_window()
window = min(window, len(obj)) if not self.center else window
results = []
for b in blocks:
result = b.notna().astype(int)
result = self._constructor(result, window=window, min_periods=0,
center=self.center,
closed=self.closed).sum()
results.append(result)
return self._wrap_results(results, blocks, obj)
_shared_docs['apply'] = dedent(r"""
%(name)s function apply
Parameters
----------
func : function
Must produce a single value from an ndarray input
\*args and \*\*kwargs are passed to the function""")
def apply(self, func, args=(), kwargs={}):
# TODO: _level is unused?
_level = kwargs.pop('_level', None) # noqa
window = self._get_window()
offset = _offset(window, self.center)
index, indexi = self._get_index()
def f(arg, window, min_periods, closed):
minp = _use_window(min_periods, window)
return _window.roll_generic(arg, window, minp, indexi, closed,
offset, func, args, kwargs)
return self._apply(f, func, args=args, kwargs=kwargs,
center=False)
def sum(self, *args, **kwargs):
nv.validate_window_func('sum', args, kwargs)
return self._apply('roll_sum', 'sum', **kwargs)
_shared_docs['max'] = dedent("""
%(name)s maximum
Parameters
----------
how : string, default 'max'
.. deprecated:: 0.18.0
Method for down- or re-sampling""")
def max(self, how=None, *args, **kwargs):
nv.validate_window_func('max', args, kwargs)
if self.freq is not None and how is None:
how = 'max'
return self._apply('roll_max', 'max', how=how, **kwargs)
_shared_docs['min'] = dedent("""
%(name)s minimum
Parameters
----------
how : string, default 'min'
.. deprecated:: 0.18.0
Method for down- or re-sampling""")
def min(self, how=None, *args, **kwargs):
nv.validate_window_func('min', args, kwargs)
if self.freq is not None and how is None:
how = 'min'
return self._apply('roll_min', 'min', how=how, **kwargs)
def mean(self, *args, **kwargs):
nv.validate_window_func('mean', args, kwargs)
return self._apply('roll_mean', 'mean', **kwargs)
_shared_docs['median'] = dedent("""
%(name)s median
Parameters
----------
how : string, default 'median'
.. deprecated:: 0.18.0
Method for down- or re-sampling""")
def median(self, how=None, **kwargs):
if self.freq is not None and how is None:
how = 'median'
return self._apply('roll_median_c', 'median', how=how, **kwargs)
_shared_docs['std'] = dedent("""
%(name)s standard deviation
Parameters
----------
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations
is ``N - ddof``, where ``N`` represents the number of elements.""")
def std(self, ddof=1, *args, **kwargs):
nv.validate_window_func('std', args, kwargs)
window = self._get_window()
index, indexi = self._get_index()
def f(arg, *args, **kwargs):
minp = _require_min_periods(1)(self.min_periods, window)
return _zsqrt(_window.roll_var(arg, window, minp, indexi,
self.closed, ddof))
return self._apply(f, 'std', check_minp=_require_min_periods(1),
ddof=ddof, **kwargs)
_shared_docs['var'] = dedent("""
%(name)s variance
Parameters
----------
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations
is ``N - ddof``, where ``N`` represents the number of elements.""")
def var(self, ddof=1, *args, **kwargs):
nv.validate_window_func('var', args, kwargs)
return self._apply('roll_var', 'var',
check_minp=_require_min_periods(1), ddof=ddof,
**kwargs)
_shared_docs['skew'] = """Unbiased %(name)s skewness"""
def skew(self, **kwargs):
return self._apply('roll_skew', 'skew',
check_minp=_require_min_periods(3), **kwargs)
_shared_docs['kurt'] = """Unbiased %(name)s kurtosis"""
def kurt(self, **kwargs):
return self._apply('roll_kurt', 'kurt',
check_minp=_require_min_periods(4), **kwargs)
_shared_docs['quantile'] = dedent("""
%(name)s quantile
Parameters
----------
quantile : float
0 <= quantile <= 1""")
def quantile(self, quantile, **kwargs):
window = self._get_window()
index, indexi = self._get_index()
def f(arg, *args, **kwargs):
minp = _use_window(self.min_periods, window)
if quantile == 1.0:
return _window.roll_max(arg, window, minp, indexi,
self.closed)
elif quantile == 0.0:
return _window.roll_min(arg, window, minp, indexi,
self.closed)
else:
return _window.roll_quantile(arg, window, minp, indexi,
self.closed, quantile)
return self._apply(f, 'quantile', quantile=quantile,
**kwargs)
_shared_docs['cov'] = dedent("""
%(name)s sample covariance
Parameters
----------
other : Series, DataFrame, or ndarray, optional
if not supplied then will default to self and produce pairwise output
pairwise : bool, default None
If False then only matching columns between self and other will be used
and the output will be a DataFrame.
If True then all pairwise combinations will be calculated and the
output will be a MultiIndexed DataFrame in the case of DataFrame
inputs. In the case of missing elements, only complete pairwise
observations will be used.
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations
is ``N - ddof``, where ``N`` represents the number of elements.""")
def cov(self, other=None, pairwise=None, ddof=1, **kwargs):
if other is None:
other = self._selected_obj
# only default unset
pairwise = True if pairwise is None else pairwise
other = self._shallow_copy(other)
# GH 16058: offset window
if self.is_freq_type:
window = self.win_freq
else:
window = self._get_window(other)
def _get_cov(X, Y):
# GH #12373 : rolling functions error on float32 data
# to avoid potential overflow, cast the data to float64
X = X.astype('float64')
Y = Y.astype('float64')
mean = lambda x: x.rolling(window, self.min_periods,
center=self.center).mean(**kwargs)
count = (X + Y).rolling(window=window,
center=self.center).count(**kwargs)
bias_adj = count / (count - ddof)
return (mean(X * Y) - mean(X) * mean(Y)) * bias_adj
return _flex_binary_moment(self._selected_obj, other._selected_obj,
_get_cov, pairwise=bool(pairwise))
_shared_docs['corr'] = dedent("""
%(name)s sample correlation
Parameters
----------
other : Series, DataFrame, or ndarray, optional
if not supplied then will default to self and produce pairwise output
pairwise : bool, default None
If False then only matching columns between self and other will be
used and the output will be a DataFrame.
If True then all pairwise combinations will be calculated and the
output will be a MultiIndex DataFrame in the case of DataFrame inputs.
In the case of missing elements, only complete pairwise observations
will be used.""")
def corr(self, other=None, pairwise=None, **kwargs):
if other is None:
other = self._selected_obj
# only default unset
pairwise = True if pairwise is None else pairwise
other = self._shallow_copy(other)
window = self._get_window(other)
def _get_corr(a, b):
a = a.rolling(window=window, min_periods=self.min_periods,
freq=self.freq, center=self.center)
b = b.rolling(window=window, min_periods=self.min_periods,
freq=self.freq, center=self.center)
return a.cov(b, **kwargs) / (a.std(**kwargs) * b.std(**kwargs))
return _flex_binary_moment(self._selected_obj, other._selected_obj,
_get_corr, pairwise=bool(pairwise))
class Rolling(_Rolling_and_Expanding):
@cache_readonly
def is_datetimelike(self):
return isinstance(self._on,
(ABCDatetimeIndex,
ABCTimedeltaIndex,
ABCPeriodIndex))
@cache_readonly
def _on(self):
if self.on is None:
return self.obj.index
elif (isinstance(self.obj, ABCDataFrame) and
self.on in self.obj.columns):
from pandas import Index
return Index(self.obj[self.on])
else:
raise ValueError("invalid on specified as {0}, "
"must be a column (if DataFrame) "
"or None".format(self.on))
def validate(self):
super(Rolling, self).validate()
# we allow rolling on a datetimelike index
if ((self.obj.empty or self.is_datetimelike) and
isinstance(self.window, (compat.string_types, ABCDateOffset,
timedelta))):
self._validate_monotonic()
freq = self._validate_freq()
# we don't allow center
if self.center:
raise NotImplementedError("center is not implemented "
"for datetimelike and offset "
"based windows")
# this will raise ValueError on non-fixed freqs
self.win_freq = self.window
self.window = freq.nanos
self.win_type = 'freq'
# min_periods must be an integer
if self.min_periods is None:
self.min_periods = 1
elif not is_integer(self.window):
raise ValueError("window must be an integer")
elif self.window < 0:
raise ValueError("window must be non-negative")
if not self.is_datetimelike and self.closed is not None:
raise ValueError("closed only implemented for datetimelike "
"and offset based windows")
def _validate_monotonic(self):
""" validate on is monotonic """
if not self._on.is_monotonic:
formatted = self.on or 'index'
raise ValueError("{0} must be "
"monotonic".format(formatted))
def _validate_freq(self):
""" validate & return our freq """
from pandas.tseries.frequencies import to_offset
try:
return to_offset(self.window)
except (TypeError, ValueError):
raise ValueError("passed window {0} in not "
"compat with a datetimelike "
"index".format(self.window))
_agg_doc = dedent("""
Examples
--------
>>> df = pd.DataFrame(np.random.randn(10, 3), columns=['A', 'B', 'C'])
>>> df
A B C
0 -2.385977 -0.102758 0.438822
1 -1.004295 0.905829 -0.954544
2 0.735167 -0.165272 -1.619346
3 -0.702657 -1.340923 -0.706334
4 -0.246845 0.211596 -0.901819
5 2.463718 3.157577 -1.380906
6 -1.142255 2.340594 -0.039875
7 1.396598 -1.647453 1.677227
8 -0.543425 1.761277 -0.220481
9 -0.640505 0.289374 -1.550670
>>> df.rolling(3).sum()
A B C
0 NaN NaN NaN
1 NaN NaN NaN
2 -2.655105 0.637799 -2.135068
3 -0.971785 -0.600366 -3.280224
4 -0.214334 -1.294599 -3.227500
5 1.514216 2.028250 -2.989060
6 1.074618 5.709767 -2.322600
7 2.718061 3.850718 0.256446
8 -0.289082 2.454418 1.416871
9 0.212668 0.403198 -0.093924
>>> df.rolling(3).agg({'A':'sum', 'B':'min'})
A B
0 NaN NaN
1 NaN NaN
2 -2.655105 -0.165272
3 -0.971785 -1.340923
4 -0.214334 -1.340923
5 1.514216 -1.340923
6 1.074618 0.211596
7 2.718061 -1.647453
8 -0.289082 -1.647453
9 0.212668 -1.647453
See also
--------
pandas.Series.rolling
pandas.DataFrame.rolling
""")
@Appender(_agg_doc)
@Appender(_shared_docs['aggregate'] % dict(
versionadded='',
klass='Series/DataFrame'))
def aggregate(self, arg, *args, **kwargs):
return super(Rolling, self).aggregate(arg, *args, **kwargs)
agg = aggregate
@Substitution(name='rolling')
@Appender(_doc_template)
@Appender(_shared_docs['count'])
def count(self):
# different impl for freq counting
if self.is_freq_type:
return self._apply('roll_count', 'count')
return super(Rolling, self).count()
@Substitution(name='rolling')
@Appender(_doc_template)
@Appender(_shared_docs['apply'])
def apply(self, func, args=(), kwargs={}):
return super(Rolling, self).apply(func, args=args, kwargs=kwargs)
@Substitution(name='rolling')
@Appender(_doc_template)
@Appender(_shared_docs['sum'])
def sum(self, *args, **kwargs):
nv.validate_rolling_func('sum', args, kwargs)
return super(Rolling, self).sum(*args, **kwargs)
@Substitution(name='rolling')
@Appender(_doc_template)
@Appender(_shared_docs['max'])
def max(self, *args, **kwargs):
nv.validate_rolling_func('max', args, kwargs)
return super(Rolling, self).max(*args, **kwargs)
@Substitution(name='rolling')
@Appender(_doc_template)
@Appender(_shared_docs['min'])
def min(self, *args, **kwargs):
nv.validate_rolling_func('min', args, kwargs)
return super(Rolling, self).min(*args, **kwargs)
@Substitution(name='rolling')
@Appender(_doc_template)
@Appender(_shared_docs['mean'])
def mean(self, *args, **kwargs):
nv.validate_rolling_func('mean', args, kwargs)
return super(Rolling, self).mean(*args, **kwargs)
@Substitution(name='rolling')
@Appender(_doc_template)
@Appender(_shared_docs['median'])
def median(self, **kwargs):
return super(Rolling, self).median(**kwargs)
@Substitution(name='rolling')
@Appender(_doc_template)
@Appender(_shared_docs['std'])
def std(self, ddof=1, *args, **kwargs):
nv.validate_rolling_func('std', args, kwargs)
return super(Rolling, self).std(ddof=ddof, **kwargs)
@Substitution(name='rolling')
@Appender(_doc_template)
@Appender(_shared_docs['var'])
def var(self, ddof=1, *args, **kwargs):
nv.validate_rolling_func('var', args, kwargs)
return super(Rolling, self).var(ddof=ddof, **kwargs)
@Substitution(name='rolling')
@Appender(_doc_template)
@Appender(_shared_docs['skew'])
def skew(self, **kwargs):
return super(Rolling, self).skew(**kwargs)
@Substitution(name='rolling')
@Appender(_doc_template)
@Appender(_shared_docs['kurt'])
def kurt(self, **kwargs):
return super(Rolling, self).kurt(**kwargs)
@Substitution(name='rolling')
@Appender(_doc_template)
@Appender(_shared_docs['quantile'])
def quantile(self, quantile, **kwargs):
return super(Rolling, self).quantile(quantile=quantile, **kwargs)
@Substitution(name='rolling')
@Appender(_doc_template)
@Appender(_shared_docs['cov'])
def cov(self, other=None, pairwise=None, ddof=1, **kwargs):
return super(Rolling, self).cov(other=other, pairwise=pairwise,
ddof=ddof, **kwargs)
@Substitution(name='rolling')
@Appender(_doc_template)
@Appender(_shared_docs['corr'])
def corr(self, other=None, pairwise=None, **kwargs):
return super(Rolling, self).corr(other=other, pairwise=pairwise,
**kwargs)
class RollingGroupby(_GroupByMixin, Rolling):
"""
Provides a rolling groupby implementation
.. versionadded:: 0.18.1
"""
@property
def _constructor(self):
return Rolling
def _gotitem(self, key, ndim, subset=None):
# we are setting the index on the actual object
# here so our index is carried thru to the selected obj
# when we do the splitting for the groupby
if self.on is not None:
self._groupby.obj = self._groupby.obj.set_index(self._on)
self.on = None
return super(RollingGroupby, self)._gotitem(key, ndim, subset=subset)
def _validate_monotonic(self):
"""
validate that on is monotonic;
we don't care for groupby.rolling
because we have already validated at a higher
level
"""
pass
class Expanding(_Rolling_and_Expanding):
"""
Provides expanding transformations.
.. versionadded:: 0.18.0
Parameters
----------
min_periods : int, default None
Minimum number of observations in window required to have a value
(otherwise result is NA).
freq : string or DateOffset object, optional (default None)
.. deprecated:: 0.18.0
Frequency to conform the data to before computing the statistic.
Specified as a frequency string or DateOffset object.
center : boolean, default False
Set the labels at the center of the window.
axis : int or string, default 0
Returns
-------
a Window sub-classed for the particular operation
Examples
--------
>>> df = DataFrame({'B': [0, 1, 2, np.nan, 4]})
B
0 0.0
1 1.0
2 2.0
3 NaN
4 4.0
>>> df.expanding(2).sum()
B
0 NaN
1 1.0
2 3.0
3 3.0
4 7.0
Notes
-----
By default, the result is set to the right edge of the window. This can be
changed to the center of the window by setting ``center=True``.
The `freq` keyword is used to conform time series data to a specified
frequency by resampling the data. This is done with the default parameters
of :meth:`~pandas.Series.resample` (i.e. using the `mean`).
"""
_attributes = ['min_periods', 'freq', 'center', 'axis']
def __init__(self, obj, min_periods=1, freq=None, center=False, axis=0,
**kwargs):
super(Expanding, self).__init__(obj=obj, min_periods=min_periods,
freq=freq, center=center, axis=axis)
@property
def _constructor(self):
return Expanding
def _get_window(self, other=None):
obj = self._selected_obj
if other is None:
return (max(len(obj), self.min_periods) if self.min_periods
else len(obj))
return (max((len(obj) + len(obj)), self.min_periods)
if self.min_periods else (len(obj) + len(obj)))
_agg_doc = dedent("""
Examples
--------
>>> df = pd.DataFrame(np.random.randn(10, 3), columns=['A', 'B', 'C'])
>>> df
A B C
0 -2.385977 -0.102758 0.438822
1 -1.004295 0.905829 -0.954544
2 0.735167 -0.165272 -1.619346
3 -0.702657 -1.340923 -0.706334
4 -0.246845 0.211596 -0.901819
5 2.463718 3.157577 -1.380906
6 -1.142255 2.340594 -0.039875
7 1.396598 -1.647453 1.677227
8 -0.543425 1.761277 -0.220481
9 -0.640505 0.289374 -1.550670
>>> df.ewm(alpha=0.5).mean()
A B C
0 -2.385977 -0.102758 0.438822
1 -1.464856 0.569633 -0.490089
2 -0.207700 0.149687 -1.135379
3 -0.471677 -0.645305 -0.906555
4 -0.355635 -0.203033 -0.904111
5 1.076417 1.503943 -1.146293
6 -0.041654 1.925562 -0.588728
7 0.680292 0.132049 0.548693
8 0.067236 0.948257 0.163353
9 -0.286980 0.618493 -0.694496
See also
--------
pandas.DataFrame.expanding.aggregate
pandas.DataFrame.rolling.aggregate
pandas.DataFrame.aggregate
""")
@Appender(_agg_doc)
@Appender(_shared_docs['aggregate'] % dict(
versionadded='',
klass='Series/DataFrame'))
def aggregate(self, arg, *args, **kwargs):
return super(Expanding, self).aggregate(arg, *args, **kwargs)
agg = aggregate
@Substitution(name='expanding')
@Appender(_doc_template)
@Appender(_shared_docs['count'])
def count(self, **kwargs):
return super(Expanding, self).count(**kwargs)
@Substitution(name='expanding')
@Appender(_doc_template)
@Appender(_shared_docs['apply'])
def apply(self, func, args=(), kwargs={}):
return super(Expanding, self).apply(func, args=args, kwargs=kwargs)
@Substitution(name='expanding')
@Appender(_doc_template)
@Appender(_shared_docs['sum'])
def sum(self, *args, **kwargs):
nv.validate_expanding_func('sum', args, kwargs)
return super(Expanding, self).sum(*args, **kwargs)
@Substitution(name='expanding')
@Appender(_doc_template)
@Appender(_shared_docs['max'])
def max(self, *args, **kwargs):
nv.validate_expanding_func('max', args, kwargs)
return super(Expanding, self).max(*args, **kwargs)
@Substitution(name='expanding')
@Appender(_doc_template)
@Appender(_shared_docs['min'])
def min(self, *args, **kwargs):
nv.validate_expanding_func('min', args, kwargs)
return super(Expanding, self).min(*args, **kwargs)
@Substitution(name='expanding')
@Appender(_doc_template)
@Appender(_shared_docs['mean'])
def mean(self, *args, **kwargs):
nv.validate_expanding_func('mean', args, kwargs)
return super(Expanding, self).mean(*args, **kwargs)
@Substitution(name='expanding')
@Appender(_doc_template)
@Appender(_shared_docs['median'])
def median(self, **kwargs):
return super(Expanding, self).median(**kwargs)
@Substitution(name='expanding')
@Appender(_doc_template)
@Appender(_shared_docs['std'])
def std(self, ddof=1, *args, **kwargs):
nv.validate_expanding_func('std', args, kwargs)
return super(Expanding, self).std(ddof=ddof, **kwargs)
@Substitution(name='expanding')
@Appender(_doc_template)
@Appender(_shared_docs['var'])
def var(self, ddof=1, *args, **kwargs):
nv.validate_expanding_func('var', args, kwargs)
return super(Expanding, self).var(ddof=ddof, **kwargs)
@Substitution(name='expanding')
@Appender(_doc_template)
@Appender(_shared_docs['skew'])
def skew(self, **kwargs):
return super(Expanding, self).skew(**kwargs)
@Substitution(name='expanding')
@Appender(_doc_template)
@Appender(_shared_docs['kurt'])
def kurt(self, **kwargs):
return super(Expanding, self).kurt(**kwargs)
@Substitution(name='expanding')
@Appender(_doc_template)
@Appender(_shared_docs['quantile'])
def quantile(self, quantile, **kwargs):
return super(Expanding, self).quantile(quantile=quantile, **kwargs)
@Substitution(name='expanding')
@Appender(_doc_template)
@Appender(_shared_docs['cov'])
def cov(self, other=None, pairwise=None, ddof=1, **kwargs):
return super(Expanding, self).cov(other=other, pairwise=pairwise,
ddof=ddof, **kwargs)
@Substitution(name='expanding')
@Appender(_doc_template)
@Appender(_shared_docs['corr'])
def corr(self, other=None, pairwise=None, **kwargs):
return super(Expanding, self).corr(other=other, pairwise=pairwise,
**kwargs)
class ExpandingGroupby(_GroupByMixin, Expanding):
"""
Provides a expanding groupby implementation
.. versionadded:: 0.18.1
"""
@property
def _constructor(self):
return Expanding
_bias_template = """
Parameters
----------
bias : boolean, default False
Use a standard estimation bias correction
"""
_pairwise_template = """
Parameters
----------
other : Series, DataFrame, or ndarray, optional
if not supplied then will default to self and produce pairwise output
pairwise : bool, default None
If False then only matching columns between self and other will be used and
the output will be a DataFrame.
If True then all pairwise combinations will be calculated and the output
will be a MultiIndex DataFrame in the case of DataFrame inputs.
In the case of missing elements, only complete pairwise observations will
be used.
bias : boolean, default False
Use a standard estimation bias correction
"""
class EWM(_Rolling):
r"""
Provides exponential weighted functions
.. versionadded:: 0.18.0
Parameters
----------
com : float, optional
Specify decay in terms of center of mass,
:math:`\alpha = 1 / (1 + com),\text{ for } com \geq 0`
span : float, optional
Specify decay in terms of span,
:math:`\alpha = 2 / (span + 1),\text{ for } span \geq 1`
halflife : float, optional
Specify decay in terms of half-life,
:math:`\alpha = 1 - exp(log(0.5) / halflife),\text{ for } halflife > 0`
alpha : float, optional
Specify smoothing factor :math:`\alpha` directly,
:math:`0 < \alpha \leq 1`
.. versionadded:: 0.18.0
min_periods : int, default 0
Minimum number of observations in window required to have a value
(otherwise result is NA).
freq : None or string alias / date offset object, default=None
.. deprecated:: 0.18.0
Frequency to conform to before computing statistic
adjust : boolean, default True
Divide by decaying adjustment factor in beginning periods to account
for imbalance in relative weightings (viewing EWMA as a moving average)
ignore_na : boolean, default False
Ignore missing values when calculating weights;
specify True to reproduce pre-0.15.0 behavior
Returns
-------
a Window sub-classed for the particular operation
Examples
--------
>>> df = DataFrame({'B': [0, 1, 2, np.nan, 4]})
B
0 0.0
1 1.0
2 2.0
3 NaN
4 4.0
>>> df.ewm(com=0.5).mean()
B
0 0.000000
1 0.750000
2 1.615385
3 1.615385
4 3.670213
Notes
-----
Exactly one of center of mass, span, half-life, and alpha must be provided.
Allowed values and relationship between the parameters are specified in the
parameter descriptions above; see the link at the end of this section for
a detailed explanation.
The `freq` keyword is used to conform time series data to a specified
frequency by resampling the data. This is done with the default parameters
of :meth:`~pandas.Series.resample` (i.e. using the `mean`).
When adjust is True (default), weighted averages are calculated using
weights (1-alpha)**(n-1), (1-alpha)**(n-2), ..., 1-alpha, 1.
When adjust is False, weighted averages are calculated recursively as:
weighted_average[0] = arg[0];
weighted_average[i] = (1-alpha)*weighted_average[i-1] + alpha*arg[i].
When ignore_na is False (default), weights are based on absolute positions.
For example, the weights of x and y used in calculating the final weighted
average of [x, None, y] are (1-alpha)**2 and 1 (if adjust is True), and
(1-alpha)**2 and alpha (if adjust is False).
When ignore_na is True (reproducing pre-0.15.0 behavior), weights are based
on relative positions. For example, the weights of x and y used in
calculating the final weighted average of [x, None, y] are 1-alpha and 1
(if adjust is True), and 1-alpha and alpha (if adjust is False).
More details can be found at
http://pandas.pydata.org/pandas-docs/stable/computation.html#exponentially-weighted-windows
"""
_attributes = ['com', 'min_periods', 'freq', 'adjust', 'ignore_na', 'axis']
def __init__(self, obj, com=None, span=None, halflife=None, alpha=None,
min_periods=0, freq=None, adjust=True, ignore_na=False,
axis=0):
self.obj = obj
self.com = _get_center_of_mass(com, span, halflife, alpha)
self.min_periods = min_periods
self.freq = freq
self.adjust = adjust
self.ignore_na = ignore_na
self.axis = axis
self.on = None
@property
def _constructor(self):
return EWM
_agg_doc = dedent("""
Examples
--------
>>> df = pd.DataFrame(np.random.randn(10, 3), columns=['A', 'B', 'C'])
>>> df
A B C
0 -2.385977 -0.102758 0.438822
1 -1.004295 0.905829 -0.954544
2 0.735167 -0.165272 -1.619346
3 -0.702657 -1.340923 -0.706334
4 -0.246845 0.211596 -0.901819
5 2.463718 3.157577 -1.380906
6 -1.142255 2.340594 -0.039875
7 1.396598 -1.647453 1.677227
8 -0.543425 1.761277 -0.220481
9 -0.640505 0.289374 -1.550670
>>> df.ewm(alpha=0.5).mean()
A B C
0 -2.385977 -0.102758 0.438822
1 -1.464856 0.569633 -0.490089
2 -0.207700 0.149687 -1.135379
3 -0.471677 -0.645305 -0.906555
4 -0.355635 -0.203033 -0.904111
5 1.076417 1.503943 -1.146293
6 -0.041654 1.925562 -0.588728
7 0.680292 0.132049 0.548693
8 0.067236 0.948257 0.163353
9 -0.286980 0.618493 -0.694496
See also
--------
pandas.DataFrame.rolling.aggregate
""")
@Appender(_agg_doc)
@Appender(_shared_docs['aggregate'] % dict(
versionadded='',
klass='Series/DataFrame'))
def aggregate(self, arg, *args, **kwargs):
return super(EWM, self).aggregate(arg, *args, **kwargs)
agg = aggregate
def _apply(self, func, how=None, **kwargs):
"""Rolling statistical measure using supplied function. Designed to be
used with passed-in Cython array-based functions.
Parameters
----------
func : string/callable to apply
how : string, default to None
.. deprecated:: 0.18.0
how to resample
Returns
-------
y : type of input argument
"""
blocks, obj, index = self._create_blocks(how=how)
results = []
for b in blocks:
try:
values = self._prep_values(b.values)
except TypeError:
results.append(b.values.copy())
continue
if values.size == 0:
results.append(values.copy())
continue
# if we have a string function name, wrap it
if isinstance(func, compat.string_types):
cfunc = getattr(_window, func, None)
if cfunc is None:
raise ValueError("we do not support this function "
"in _window.{0}".format(func))
def func(arg):
return cfunc(arg, self.com, int(self.adjust),
int(self.ignore_na), int(self.min_periods))
results.append(np.apply_along_axis(func, self.axis, values))
return self._wrap_results(results, blocks, obj)
@Substitution(name='ewm')
@Appender(_doc_template)
def mean(self, *args, **kwargs):
"""exponential weighted moving average"""
nv.validate_window_func('mean', args, kwargs)
return self._apply('ewma', **kwargs)
@Substitution(name='ewm')
@Appender(_doc_template)
@Appender(_bias_template)
def std(self, bias=False, *args, **kwargs):
"""exponential weighted moving stddev"""
nv.validate_window_func('std', args, kwargs)
return _zsqrt(self.var(bias=bias, **kwargs))
vol = std
@Substitution(name='ewm')
@Appender(_doc_template)
@Appender(_bias_template)
def var(self, bias=False, *args, **kwargs):
"""exponential weighted moving variance"""
nv.validate_window_func('var', args, kwargs)
def f(arg):
return _window.ewmcov(arg, arg, self.com, int(self.adjust),
int(self.ignore_na), int(self.min_periods),
int(bias))
return self._apply(f, **kwargs)
@Substitution(name='ewm')
@Appender(_doc_template)
@Appender(_pairwise_template)
def cov(self, other=None, pairwise=None, bias=False, **kwargs):
"""exponential weighted sample covariance"""
if other is None:
other = self._selected_obj
# only default unset
pairwise = True if pairwise is None else pairwise
other = self._shallow_copy(other)
def _get_cov(X, Y):
X = self._shallow_copy(X)
Y = self._shallow_copy(Y)
cov = _window.ewmcov(X._prep_values(), Y._prep_values(), self.com,
int(self.adjust), int(self.ignore_na),
int(self.min_periods), int(bias))
return X._wrap_result(cov)
return _flex_binary_moment(self._selected_obj, other._selected_obj,
_get_cov, pairwise=bool(pairwise))
@Substitution(name='ewm')
@Appender(_doc_template)
@Appender(_pairwise_template)
def corr(self, other=None, pairwise=None, **kwargs):
"""exponential weighted sample correlation"""
if other is None:
other = self._selected_obj
# only default unset
pairwise = True if pairwise is None else pairwise
other = self._shallow_copy(other)
def _get_corr(X, Y):
X = self._shallow_copy(X)
Y = self._shallow_copy(Y)
def _cov(x, y):
return _window.ewmcov(x, y, self.com, int(self.adjust),
int(self.ignore_na),
int(self.min_periods),
1)
x_values = X._prep_values()
y_values = Y._prep_values()
with np.errstate(all='ignore'):
cov = _cov(x_values, y_values)
x_var = _cov(x_values, x_values)
y_var = _cov(y_values, y_values)
corr = cov / _zsqrt(x_var * y_var)
return X._wrap_result(corr)
return _flex_binary_moment(self._selected_obj, other._selected_obj,
_get_corr, pairwise=bool(pairwise))
# Helper Funcs
def _flex_binary_moment(arg1, arg2, f, pairwise=False):
if not (isinstance(arg1, (np.ndarray, ABCSeries, ABCDataFrame)) and
isinstance(arg2, (np.ndarray, ABCSeries, ABCDataFrame))):
raise TypeError("arguments to moment function must be of type "
"np.ndarray/Series/DataFrame")
if (isinstance(arg1, (np.ndarray, ABCSeries)) and
isinstance(arg2, (np.ndarray, ABCSeries))):
X, Y = _prep_binary(arg1, arg2)
return f(X, Y)
elif isinstance(arg1, ABCDataFrame):
from pandas import DataFrame
def dataframe_from_int_dict(data, frame_template):
result = DataFrame(data, index=frame_template.index)
if len(result.columns) > 0:
result.columns = frame_template.columns[result.columns]
return result
results = {}
if isinstance(arg2, ABCDataFrame):
if pairwise is False:
if arg1 is arg2:
# special case in order to handle duplicate column names
for i, col in enumerate(arg1.columns):
results[i] = f(arg1.iloc[:, i], arg2.iloc[:, i])
return dataframe_from_int_dict(results, arg1)
else:
if not arg1.columns.is_unique:
raise ValueError("'arg1' columns are not unique")
if not arg2.columns.is_unique:
raise ValueError("'arg2' columns are not unique")
with warnings.catch_warnings(record=True):
X, Y = arg1.align(arg2, join='outer')
X = X + 0 * Y
Y = Y + 0 * X
with warnings.catch_warnings(record=True):
res_columns = arg1.columns.union(arg2.columns)
for col in res_columns:
if col in X and col in Y:
results[col] = f(X[col], Y[col])
return DataFrame(results, index=X.index,
columns=res_columns)
elif pairwise is True:
results = defaultdict(dict)
for i, k1 in enumerate(arg1.columns):
for j, k2 in enumerate(arg2.columns):
if j < i and arg2 is arg1:
# Symmetric case
results[i][j] = results[j][i]
else:
results[i][j] = f(*_prep_binary(arg1.iloc[:, i],
arg2.iloc[:, j]))
# TODO: not the most efficient (perf-wise)
# though not bad code-wise
from pandas import Panel, MultiIndex, concat
with warnings.catch_warnings(record=True):
p = Panel.from_dict(results).swapaxes('items', 'major')
if len(p.major_axis) > 0:
p.major_axis = arg1.columns[p.major_axis]
if len(p.minor_axis) > 0:
p.minor_axis = arg2.columns[p.minor_axis]
if len(p.items):
result = concat(
[p.iloc[i].T for i in range(len(p.items))],
keys=p.items)
else:
result = DataFrame(
index=MultiIndex(levels=[arg1.index, arg1.columns],
labels=[[], []]),
columns=arg2.columns,
dtype='float64')
# reset our index names to arg1 names
# reset our column names to arg2 names
# careful not to mutate the original names
result.columns = result.columns.set_names(
arg2.columns.names)
result.index = result.index.set_names(
arg1.index.names + arg1.columns.names)
return result
else:
raise ValueError("'pairwise' is not True/False")
else:
results = {}
for i, col in enumerate(arg1.columns):
results[i] = f(*_prep_binary(arg1.iloc[:, i], arg2))
return dataframe_from_int_dict(results, arg1)
else:
return _flex_binary_moment(arg2, arg1, f)
def _get_center_of_mass(com, span, halflife, alpha):
valid_count = len([x for x in [com, span, halflife, alpha]
if x is not None])
if valid_count > 1:
raise ValueError("com, span, halflife, and alpha "
"are mutually exclusive")
# Convert to center of mass; domain checks ensure 0 < alpha <= 1
if com is not None:
if com < 0:
raise ValueError("com must satisfy: com >= 0")
elif span is not None:
if span < 1:
raise ValueError("span must satisfy: span >= 1")
com = (span - 1) / 2.
elif halflife is not None:
if halflife <= 0:
raise ValueError("halflife must satisfy: halflife > 0")
decay = 1 - np.exp(np.log(0.5) / halflife)
com = 1 / decay - 1
elif alpha is not None:
if alpha <= 0 or alpha > 1:
raise ValueError("alpha must satisfy: 0 < alpha <= 1")
com = (1.0 - alpha) / alpha
else:
raise ValueError("Must pass one of com, span, halflife, or alpha")
return float(com)
def _offset(window, center):
if not is_integer(window):
window = len(window)
offset = (window - 1) / 2. if center else 0
try:
return int(offset)
except:
return offset.astype(int)
def _require_min_periods(p):
def _check_func(minp, window):
if minp is None:
return window
else:
return max(p, minp)
return _check_func
def _use_window(minp, window):
if minp is None:
return window
else:
return minp
def _zsqrt(x):
with np.errstate(all='ignore'):
result = np.sqrt(x)
mask = x < 0
if isinstance(x, ABCDataFrame):
if mask.values.any():
result[mask] = 0
else:
if mask.any():
result[mask] = 0
return result
def _prep_binary(arg1, arg2):
if not isinstance(arg2, type(arg1)):
raise Exception('Input arrays must be of the same type!')
# mask out values, this also makes a common index...
X = arg1 + 0 * arg2
Y = arg2 + 0 * arg1
return X, Y
# Top-level exports
def rolling(obj, win_type=None, **kwds):
if not isinstance(obj, (ABCSeries, ABCDataFrame)):
raise TypeError('invalid type: %s' % type(obj))
if win_type is not None:
return Window(obj, win_type=win_type, **kwds)
return Rolling(obj, **kwds)
rolling.__doc__ = Window.__doc__
def expanding(obj, **kwds):
if not isinstance(obj, (ABCSeries, ABCDataFrame)):
raise TypeError('invalid type: %s' % type(obj))
return Expanding(obj, **kwds)
expanding.__doc__ = Expanding.__doc__
def ewm(obj, **kwds):
if not isinstance(obj, (ABCSeries, ABCDataFrame)):
raise TypeError('invalid type: %s' % type(obj))
return EWM(obj, **kwds)
ewm.__doc__ = EWM.__doc__
| Winand/pandas | pandas/core/window.py | Python | bsd-3-clause | 68,740 | [
"Gaussian"
] | d4e0ef6c531af6c4326db8b3d9e0385e495ef8c4ce07d34f5d07adcbd35d8f73 |
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
from django.core.management import call_command
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Setting'
db.create_table(u'profiles_setting', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=255)),
('value', self.gf('django.db.models.fields.CharField')(max_length=255)),
))
db.send_create_signal(u'profiles', ['Setting'])
call_command("loaddata", "0007_auto__add_setting.json")
def backwards(self, orm):
# Deleting model 'Setting'
db.delete_table(u'profiles_setting')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'maps.shapefile': {
'Meta': {'object_name': 'ShapeFile'},
'color': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'geo_key_column': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'geo_meta_key_column': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'geom_type': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label_column': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'shape_file': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'zoom_threshold': ('django.db.models.fields.IntegerField', [], {'default': '5'})
},
u'profiles.customvalue': {
'Meta': {'object_name': 'CustomValue'},
'data_type': ('django.db.models.fields.CharField', [], {'default': "'COUNT'", 'max_length': '30'}),
'display_value': ('django.db.models.fields.CharField', [], {'max_length': "'255'"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.Indicator']"}),
'supress': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'value_operator': ('django.db.models.fields.CharField', [], {'max_length': "'255'"}),
'value_operator_range': ('django.db.models.fields.CharField', [], {'max_length': "'255'", 'null': 'True', 'blank': 'True'})
},
u'profiles.datadomain': {
'Meta': {'ordering': "['weight']", 'object_name': 'DataDomain'},
'group': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['profiles.Group']", 'through': u"orm['profiles.DataDomainIndex']", 'symmetrical': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicators': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['profiles.Indicator']", 'through': u"orm['profiles.IndicatorDomain']", 'symmetrical': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'publish': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'}),
'subdomain_only': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'subdomains': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['profiles.DataDomain']", 'symmetrical': 'False', 'blank': 'True'}),
'weight': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'})
},
u'profiles.datadomainindex': {
'Meta': {'ordering': "['order']", 'object_name': 'DataDomainIndex'},
'dataDomain': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.DataDomain']"}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1', 'db_index': 'True'})
},
u'profiles.datafile': {
'Meta': {'object_name': 'DataFile'},
'added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'file': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'profiles.datapoint': {
'Meta': {'unique_together': "(('indicator', 'record', 'time'),)", 'object_name': 'DataPoint'},
'change_from_time': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'datapoint_as_change_from'", 'null': 'True', 'to': u"orm['profiles.Time']"}),
'change_to_time': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'datapoint_as_change_to'", 'null': 'True', 'to': u"orm['profiles.Time']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.Indicator']"}),
'record': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.GeoRecord']"}),
'time': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.Time']", 'null': 'True'})
},
u'profiles.datasource': {
'Meta': {'object_name': 'DataSource'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'implementation': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'})
},
u'profiles.denominator': {
'Meta': {'object_name': 'Denominator'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.Indicator']"}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'multiplier': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '100', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'sort': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'table_label': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'})
},
u'profiles.denominatorpart': {
'Meta': {'ordering': "['order']", 'object_name': 'DenominatorPart'},
'data': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.DataFile']", 'null': 'True', 'blank': 'True'}),
'data_source': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.DataSource']"}),
'denominator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.Denominator']"}),
'formula': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.Indicator']"}),
'levels': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['profiles.GeoLevel']", 'symmetrical': 'False'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1', 'db_index': 'True'}),
'part': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.IndicatorPart']"}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
u'profiles.flatvalue': {
'Meta': {'object_name': 'FlatValue'},
'display_title': ('django.db.models.fields.CharField', [], {'max_length': "'255'", 'db_index': 'True'}),
'f_moe': ('django.db.models.fields.CharField', [], {'max_length': "'255'", 'null': 'True', 'blank': 'True'}),
'f_number': ('django.db.models.fields.CharField', [], {'max_length': "'255'", 'null': 'True', 'blank': 'True'}),
'f_numerator': ('django.db.models.fields.CharField', [], {'max_length': "'255'", 'null': 'True', 'blank': 'True'}),
'f_numerator_moe': ('django.db.models.fields.CharField', [], {'max_length': "'255'", 'null': 'True', 'blank': 'True'}),
'f_percent': ('django.db.models.fields.CharField', [], {'max_length': "'255'", 'null': 'True', 'blank': 'True'}),
'geography': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.GeoRecord']"}),
'geography_geo_key': ('django.db.models.fields.CharField', [], {'default': '0', 'max_length': "'255'", 'db_index': 'True'}),
'geography_name': ('django.db.models.fields.CharField', [], {'max_length': "'255'"}),
'geography_slug': ('django.db.models.fields.CharField', [], {'max_length': "'255'", 'db_index': 'True'}),
'geometry_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.Indicator']"}),
'indicator_slug': ('django.db.models.fields.CharField', [], {'max_length': "'255'", 'db_index': 'True'}),
'moe': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'number': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'numerator': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'numerator_moe': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'percent': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'time_key': ('django.db.models.fields.CharField', [], {'max_length': "'255'"}),
'value_type': ('django.db.models.fields.CharField', [], {'max_length': "'100'"})
},
u'profiles.geolevel': {
'Meta': {'ordering': "['summary_level']", 'object_name': 'GeoLevel'},
'data_sources': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['profiles.DataSource']", 'symmetrical': 'False', 'blank': 'True'}),
'display_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '200', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'db_index': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': u"orm['profiles.GeoLevel']"}),
'related_within': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'related_levels_within'", 'null': 'True', 'to': u"orm['profiles.GeoLevel']"}),
'shapefile': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['maps.ShapeFile']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '200'}),
'summary_level': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'year': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
u'profiles.georecord': {
'Meta': {'unique_together': "(('slug', 'level'), ('level', 'geo_id', 'custom_name', 'owner'))", 'object_name': 'GeoRecord'},
'components': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'components_rel_+'", 'blank': 'True', 'to': u"orm['profiles.GeoRecord']"}),
'custom_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'geo_id': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'geo_id_segments': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'geo_searchable': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.GeoLevel']"}),
'mappings': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'mappings_rel_+'", 'blank': 'True', 'to': u"orm['profiles.GeoRecord']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'db_index': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.GeoRecord']", 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '100', 'blank': 'True'})
},
u'profiles.group': {
'Meta': {'ordering': "['name']", 'object_name': 'Group'},
'domain': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'domain_index'", 'symmetrical': 'False', 'through': u"orm['profiles.DataDomainIndex']", 'to': u"orm['profiles.DataDomain']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicators': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['profiles.Indicator']", 'through': u"orm['profiles.GroupIndex']", 'symmetrical': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100', 'db_index': 'True'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'})
},
u'profiles.groupindex': {
'Meta': {'ordering': "['name']", 'object_name': 'GroupIndex'},
'groups': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicators': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'groups'", 'to': u"orm['profiles.Indicator']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1', 'db_index': 'True'})
},
u'profiles.indicator': {
'Meta': {'ordering': "['name']", 'object_name': 'Indicator'},
'data_as_of': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'data_domains': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['profiles.Group']", 'through': u"orm['profiles.GroupIndex']", 'symmetrical': 'False'}),
'data_type': ('django.db.models.fields.CharField', [], {'default': "'COUNT'", 'max_length': '30'}),
'display_change': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'display_distribution': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'display_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'display_percent': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicator_tasks': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'ind_tasks'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['profiles.IndicatorTask']"}),
'last_generated_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'last_modified_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'limitations': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'long_definition': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100', 'db_index': 'True'}),
'next_update_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'purpose': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'routine_use': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'short_definition': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'}),
'source': ('django.db.models.fields.CharField', [], {'default': "'U.S. Census Bureau'", 'max_length': '300', 'blank': 'True'}),
'universe': ('django.db.models.fields.CharField', [], {'max_length': '300', 'blank': 'True'})
},
u'profiles.indicatordomain': {
'Meta': {'object_name': 'IndicatorDomain'},
'default': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'domain': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.DataDomain']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.Indicator']"})
},
u'profiles.indicatorpart': {
'Meta': {'ordering': "['order']", 'object_name': 'IndicatorPart'},
'data': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.DataFile']", 'null': 'True', 'blank': 'True'}),
'data_source': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.DataSource']"}),
'formula': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.Indicator']"}),
'levels': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['profiles.GeoLevel']", 'symmetrical': 'False'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1', 'db_index': 'True'}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'time': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.Time']"})
},
u'profiles.indicatortask': {
'Meta': {'object_name': 'IndicatorTask'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.Indicator']", 'null': 'True', 'blank': 'True'}),
'task_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
u'profiles.legendoption': {
'Meta': {'object_name': 'LegendOption'},
'bin_options': ('django.db.models.fields.TextField', [], {'default': "''"}),
'bin_type': ('django.db.models.fields.CharField', [], {'default': "'jenks'", 'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.Indicator']"})
},
u'profiles.precalculatedvalue': {
'Meta': {'object_name': 'PrecalculatedValue'},
'data_source': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.DataSource']"}),
'geo_record': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.GeoRecord']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'table': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'value': ('django.db.models.fields.TextField', [], {'blank': 'True'})
},
u'profiles.setting': {
'Meta': {'object_name': 'Setting'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'profiles.taskstatus': {
'Meta': {'object_name': 'TaskStatus'},
'error': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
't_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'task': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'traceback': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'unicode_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
u'profiles.time': {
'Meta': {'ordering': "['name']", 'object_name': 'Time'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'sort': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '1'})
},
u'profiles.value': {
'Meta': {'object_name': 'Value'},
'datapoint': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.DataPoint']"}),
'denominator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.Denominator']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'moe': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'number': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'percent': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'})
}
}
complete_apps = ['profiles']
| 216software/Profiles | communityprofiles/profiles/migrations/0007_auto__add_setting.py | Python | mit | 28,556 | [
"MOE"
] | 7b20dd128d818ef595f907b52a3b80f5dab3939f50fc452e763943dcbbcb2d28 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# --- BEGIN_HEADER ---
#
# createdevaccount - create a MiG server development account
# Copyright (C) 2003-2014 The MiG Project lead by Brian Vinter
#
# This file is part of MiG.
#
# MiG is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# MiG is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# -- END_HEADER ---
#
"""Add a unprivileged user with access to a personal MiG server.
Still needs some semi-automated setup of apache, sudo and iptables
afterwards...
This is very much bound to the exact setup used on the main MiG servers
where things like remote login, firewalling, home dirs and sudo are set up
for separated developer accounts. Some paths like for apache and vgrid helpers
are similarly hard coded to the Debian defaults on those servers.
"""
import getopt
import os
import socket
import sys
from shared.install import create_user
def usage(options):
"""Usage help"""
lines = ["--%s=%s" % pair for pair in zip(options,
[i.upper() for i in options])]
print '''Usage:
%s [OPTIONS] LOGIN [LOGIN ...]
Create developer account with username LOGIN using OPTIONS.
Where supported options include -h/--help for this help or the conf settings:
%s
IMPORTANT: needs to run with privileges to create system user!
''' % (sys.argv[0], '\n'.join(lines))
if __name__ == '__main__':
settings = {
'public_fqdn': socket.getfqdn(),
'cert_fqdn': socket.getfqdn(),
'oid_fqdn': socket.getfqdn(),
'sid_fqdn': socket.getfqdn(),
'debug_mode': True,
}
flag_str = 'h'
opts_str = ["%s=" % key for key in settings.keys()] + ["help"]
try:
(opts, args) = getopt.getopt(sys.argv[1:], flag_str, opts_str)
except getopt.GetoptError, exc:
print 'Error: ', exc.msg
usage(settings)
sys.exit(1)
for (opt, val) in opts:
opt_name = opt.lstrip('-')
if opt in ('-h', '--help'):
usage(settings)
sys.exit(0)
elif opt_name in settings.keys():
settings[opt_name] = val
else:
print 'Error: %s not supported!' % opt
usage(settings)
sys.exit(1)
if not args:
usage(settings)
sys.exit(1)
if os.getuid() > 0:
print "WARNING: needs to run with user management privileges!"
print '# Creating dev account with:'
for (key, val) in settings.items():
print '%s: %s' % (key, val)
for login in args:
print '# Creating a unprivileged account for %s' % login
create_user(login, login, debug=settings["debug_mode"],
public_fqdn=settings["public_fqdn"],
cert_fqdn=settings["cert_fqdn"],
oid_fqdn=settings["oid_fqdn"],
sid_fqdn=settings["sid_fqdn"])
sys.exit(0)
| heromod/migrid | mig/install/createdevaccount.py | Python | gpl-2.0 | 3,468 | [
"Brian"
] | eddfaee1b1f6e856eb74d001e52fee9b1fba585d9700f4c743dd541e57a8c173 |
# coding=utf-8
# Copyright © 2016 Computational Molecular Biology Group,
# Freie Universität Berlin (GER)
#
# This file is part of ReaDDy.
#
# ReaDDy is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 3 of
# the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General
# Public License along with this program. If not, see
# <http://www.gnu.org/licenses/>.
"""
Created on 10.10.17
@author: clonker
"""
from . import ureg as _ureg
from . import Q_ as _Q_
class UnitConfiguration(object):
def __init__(self, length_unit='nanometer', time_unit='nanosecond', energy_unit='kilojoule/mol',
temperature_unit='kelvin'):
self._length_unit = _ureg.parse_units(length_unit)
self._time_unit = _ureg.parse_units(time_unit)
self._energy_unit = _ureg.parse_units(energy_unit)
self._temperature_unit = _ureg.parse_units(temperature_unit)
self._boltzmann = _ureg.parse_units('boltzmann_constant')
self._avogadro = _ureg.parse_units('avogadro_number')
@property
def reg(self):
return _ureg
@property
def boltzmann(self):
return self._boltzmann
@property
def avogadro(self):
return self._avogadro
@property
def temperature_unit(self):
return self._temperature_unit
@temperature_unit.setter
def temperature_unit(self, value):
assert isinstance(value, _ureg.Unit), "temperature unit can only be an instance of unit"
self._temperature_unit = value
@property
def length_unit(self):
return self._length_unit
@length_unit.setter
def length_unit(self, value):
assert isinstance(value, _ureg.Unit), "length unit can only be an instance of unit"
self._length_unit = value
@property
def time_unit(self):
return self._time_unit
@time_unit.setter
def time_unit(self, value):
assert isinstance(value, _ureg.Unit), "time unit can only be an instance of unit"
self._time_unit = value
@property
def energy_unit(self):
return self._energy_unit
@energy_unit.setter
def energy_unit(self, value):
assert isinstance(value, _ureg.Unit), "energy_unit unit can only be an instance of unit"
self._energy_unit = value
@property
def force_constant_unit(self):
return self.energy_unit / (self.length_unit ** 2)
@property
def diffusion_constant_unit(self):
return (self.length_unit ** 2) / self.time_unit
@property
def reaction_rate_unit(self):
return 1/self.time_unit
def convert(self, value, target_units):
if not isinstance(value, _Q_):
return value
return value.to(target_units).magnitude
class NoUnitConfiguration(UnitConfiguration):
def __init__(self):
super().__init__()
self._length_unit = 1.
self._time_unit = 1.
self._temperature_unit = 1.
self._energy_unit = 1.
def convert(self, value, target_units):
if not isinstance(value, _Q_):
return value
return value.magnitude
| chrisfroe/readdy | wrappers/python/src/python/readdy/api/conf/UnitConfiguration.py | Python | lgpl-3.0 | 3,542 | [
"Avogadro"
] | d474a79b2bae4f67faae72607041cb254702e5cc72812874188df6d32fc81be1 |
# -*- coding: utf-8 -*-
import itertools
import functools
import os
import re
import urllib
import logging
import pymongo
import datetime
import urlparse
from collections import OrderedDict
import warnings
import pytz
from flask import request
from django.core.urlresolvers import reverse
from modularodm import Q
from modularodm import fields
from modularodm.validators import MaxLengthValidator
from modularodm.exceptions import NoResultsFound
from modularodm.exceptions import ValidationTypeError
from modularodm.exceptions import ValidationValueError
from api.base.utils import absolute_reverse
from framework import status
from framework.mongo import ObjectId
from framework.mongo import StoredObject
from framework.addons import AddonModelMixin
from framework.auth import get_user, User, Auth
from framework.auth import signals as auth_signals
from framework.exceptions import PermissionsError
from framework.guid.model import GuidStoredObject
from framework.auth.utils import privacy_info_handle
from framework.analytics import tasks as piwik_tasks
from framework.mongo.utils import to_mongo, to_mongo_key, unique_on
from framework.analytics import (
get_basic_counters, increment_user_activity_counters
)
from framework.sentry import log_exception
from framework.transactions.context import TokuTransaction
from framework.utils import iso8601format
from website import language, mails, settings, tokens
from website.util import web_url_for
from website.util import api_url_for
from website.util import sanitize
from website.exceptions import (
NodeStateError,
InvalidSanctionApprovalToken, InvalidSanctionRejectionToken,
)
from website.citations.utils import datetime_to_csl
from website.identifiers.model import IdentifierMixin
from website.util.permissions import expand_permissions
from website.util.permissions import CREATOR_PERMISSIONS, DEFAULT_CONTRIBUTOR_PERMISSIONS, ADMIN
from website.project.metadata.schemas import OSF_META_SCHEMAS
from website.project import signals as project_signals
logger = logging.getLogger(__name__)
VIEW_PROJECT_URL_TEMPLATE = settings.DOMAIN + '{node_id}/'
def has_anonymous_link(node, auth):
"""check if the node is anonymous to the user
:param Node node: Node which the user wants to visit
:param str link: any view-only link in the current url
:return bool anonymous: Whether the node is anonymous to the user or not
"""
view_only_link = auth.private_key or request.args.get('view_only', '').strip('/')
if not view_only_link:
return False
if node.is_public:
return False
return any(
link.anonymous
for link in node.private_links_active
if link.key == view_only_link
)
class MetaSchema(StoredObject):
_id = fields.StringField(default=lambda: str(ObjectId()))
name = fields.StringField()
schema = fields.DictionaryField()
category = fields.StringField()
# Version of the Knockout metadata renderer to use (e.g. if data binds
# change)
metadata_version = fields.IntegerField()
# Version of the schema to use (e.g. if questions, responses change)
schema_version = fields.IntegerField()
def ensure_schemas(clear=True):
"""Import meta-data schemas from JSON to database, optionally clearing
database first.
:param clear: Clear schema database before import
"""
if clear:
try:
MetaSchema.remove()
except AttributeError:
if not settings.DEBUG_MODE:
raise
for schema in OSF_META_SCHEMAS:
try:
MetaSchema.find_one(
Q('name', 'eq', schema['name']) &
Q('schema_version', 'eq', schema['schema_version'])
)
except:
schema['name'] = schema['name'].replace(' ', '_')
schema_obj = MetaSchema(**schema)
schema_obj.save()
class MetaData(GuidStoredObject):
_id = fields.StringField(primary=True)
target = fields.AbstractForeignField(backref='metadata')
data = fields.DictionaryField()
date_created = fields.DateTimeField(auto_now_add=datetime.datetime.utcnow)
date_modified = fields.DateTimeField(auto_now=datetime.datetime.utcnow)
def validate_comment_reports(value, *args, **kwargs):
for key, val in value.iteritems():
if not User.load(key):
raise ValidationValueError('Keys must be user IDs')
if not isinstance(val, dict):
raise ValidationTypeError('Values must be dictionaries')
if 'category' not in val or 'text' not in val:
raise ValidationValueError(
'Values must include `category` and `text` keys'
)
class Comment(GuidStoredObject):
_id = fields.StringField(primary=True)
user = fields.ForeignField('user', required=True, backref='commented')
node = fields.ForeignField('node', required=True, backref='comment_owner')
target = fields.AbstractForeignField(required=True, backref='commented')
date_created = fields.DateTimeField(auto_now_add=datetime.datetime.utcnow)
date_modified = fields.DateTimeField(auto_now=datetime.datetime.utcnow)
modified = fields.BooleanField()
is_deleted = fields.BooleanField(default=False)
content = fields.StringField()
# Dictionary field mapping user IDs to dictionaries of report details:
# {
# 'icpnw': {'category': 'hate', 'message': 'offensive'},
# 'cdi38': {'category': 'spam', 'message': 'godwins law'},
# }
reports = fields.DictionaryField(validate=validate_comment_reports)
@classmethod
def create(cls, auth, **kwargs):
comment = cls(**kwargs)
comment.save()
comment.node.add_log(
NodeLog.COMMENT_ADDED,
{
'project': comment.node.parent_id,
'node': comment.node._id,
'user': comment.user._id,
'comment': comment._id,
},
auth=auth,
save=False,
)
comment.node.save()
return comment
def edit(self, content, auth, save=False):
self.content = content
self.modified = True
self.node.add_log(
NodeLog.COMMENT_UPDATED,
{
'project': self.node.parent_id,
'node': self.node._id,
'user': self.user._id,
'comment': self._id,
},
auth=auth,
save=False,
)
if save:
self.save()
def delete(self, auth, save=False):
self.is_deleted = True
self.node.add_log(
NodeLog.COMMENT_REMOVED,
{
'project': self.node.parent_id,
'node': self.node._id,
'user': self.user._id,
'comment': self._id,
},
auth=auth,
save=False,
)
if save:
self.save()
def undelete(self, auth, save=False):
self.is_deleted = False
self.node.add_log(
NodeLog.COMMENT_ADDED,
{
'project': self.node.parent_id,
'node': self.node._id,
'user': self.user._id,
'comment': self._id,
},
auth=auth,
save=False,
)
if save:
self.save()
def report_abuse(self, user, save=False, **kwargs):
"""Report that a comment is abuse.
:param User user: User submitting the report
:param bool save: Save changes
:param dict kwargs: Report details
:raises: ValueError if the user submitting abuse is the same as the
user who posted the comment
"""
if user == self.user:
raise ValueError
self.reports[user._id] = kwargs
if save:
self.save()
def unreport_abuse(self, user, save=False):
"""Revoke report of abuse.
:param User user: User who submitted the report
:param bool save: Save changes
:raises: ValueError if user has not reported comment as abuse
"""
try:
self.reports.pop(user._id)
except KeyError:
raise ValueError('User has not reported comment as abuse')
if save:
self.save()
@unique_on(['params.node', '_id'])
class NodeLog(StoredObject):
_id = fields.StringField(primary=True, default=lambda: str(ObjectId()))
date = fields.DateTimeField(default=datetime.datetime.utcnow, index=True)
action = fields.StringField(index=True)
params = fields.DictionaryField()
should_hide = fields.BooleanField(default=False)
was_connected_to = fields.ForeignField('node', list=True)
user = fields.ForeignField('user', index=True)
foreign_user = fields.StringField()
DATE_FORMAT = '%m/%d/%Y %H:%M UTC'
# Log action constants -- NOTE: templates stored in log_templates.mako
CREATED_FROM = 'created_from'
PROJECT_CREATED = 'project_created'
PROJECT_REGISTERED = 'project_registered'
PROJECT_DELETED = 'project_deleted'
NODE_CREATED = 'node_created'
NODE_FORKED = 'node_forked'
NODE_REMOVED = 'node_removed'
POINTER_CREATED = 'pointer_created'
POINTER_FORKED = 'pointer_forked'
POINTER_REMOVED = 'pointer_removed'
WIKI_UPDATED = 'wiki_updated'
WIKI_DELETED = 'wiki_deleted'
WIKI_RENAMED = 'wiki_renamed'
MADE_WIKI_PUBLIC = 'made_wiki_public'
MADE_WIKI_PRIVATE = 'made_wiki_private'
CONTRIB_ADDED = 'contributor_added'
CONTRIB_REMOVED = 'contributor_removed'
CONTRIB_REORDERED = 'contributors_reordered'
PERMISSIONS_UPDATED = 'permissions_updated'
MADE_PRIVATE = 'made_private'
MADE_PUBLIC = 'made_public'
TAG_ADDED = 'tag_added'
TAG_REMOVED = 'tag_removed'
EDITED_TITLE = 'edit_title'
EDITED_DESCRIPTION = 'edit_description'
UPDATED_FIELDS = 'updated_fields'
FILE_MOVED = 'addon_file_moved'
FILE_COPIED = 'addon_file_copied'
FILE_RENAMED = 'addon_file_renamed'
FOLDER_CREATED = 'folder_created'
FILE_ADDED = 'file_added'
FILE_UPDATED = 'file_updated'
FILE_REMOVED = 'file_removed'
FILE_RESTORED = 'file_restored'
ADDON_ADDED = 'addon_added'
ADDON_REMOVED = 'addon_removed'
COMMENT_ADDED = 'comment_added'
COMMENT_REMOVED = 'comment_removed'
COMMENT_UPDATED = 'comment_updated'
MADE_CONTRIBUTOR_VISIBLE = 'made_contributor_visible'
MADE_CONTRIBUTOR_INVISIBLE = 'made_contributor_invisible'
EXTERNAL_IDS_ADDED = 'external_ids_added'
EMBARGO_APPROVED = 'embargo_approved'
EMBARGO_CANCELLED = 'embargo_cancelled'
EMBARGO_COMPLETED = 'embargo_completed'
EMBARGO_INITIATED = 'embargo_initiated'
RETRACTION_APPROVED = 'retraction_approved'
RETRACTION_CANCELLED = 'retraction_cancelled'
RETRACTION_INITIATED = 'retraction_initiated'
REGISTRATION_APPROVAL_CANCELLED = 'registration_cancelled'
REGISTRATION_APPROVAL_INITIATED = 'registration_initiated'
REGISTRATION_APPROVAL_APPROVED = 'registration_approved'
def __repr__(self):
return ('<NodeLog({self.action!r}, params={self.params!r}) '
'with id {self._id!r}>').format(self=self)
@property
def node(self):
"""Return the :class:`Node` associated with this log."""
return (
Node.load(self.params.get('node')) or
Node.load(self.params.get('project'))
)
@property
def tz_date(self):
'''Return the timezone-aware date.
'''
# Date should always be defined, but a few logs in production are
# missing dates; return None and log error if date missing
if self.date:
return self.date.replace(tzinfo=pytz.UTC)
logger.error('Date missing on NodeLog {}'.format(self._primary_key))
@property
def formatted_date(self):
'''Return the timezone-aware, ISO-formatted string representation of
this log's date.
'''
if self.tz_date:
return self.tz_date.isoformat()
def resolve_node(self, node):
"""A single `NodeLog` record may be attached to multiple `Node` records
(parents, forks, registrations, etc.), so the node that the log refers
to may not be the same as the node the user is viewing. Use
`resolve_node` to determine the relevant node to use for permission
checks.
:param Node node: Node being viewed
"""
if self.node == node or self.node in node.nodes:
return self.node
if node.is_fork_of(self.node) or node.is_registration_of(self.node):
return node
for child in node.nodes:
if child.is_fork_of(self.node) or node.is_registration_of(self.node):
return child
return False
def can_view(self, node, auth):
node_to_check = self.resolve_node(node)
if node_to_check:
return node_to_check.can_view(auth)
return False
def _render_log_contributor(self, contributor, anonymous=False):
user = User.load(contributor)
if not user:
# Handle legacy non-registered users, which were
# represented as a dict
if isinstance(contributor, dict):
if 'nr_name' in contributor:
return {
'fullname': contributor['nr_name'],
'registered': False,
}
return None
if self.node:
fullname = user.display_full_name(node=self.node)
else:
fullname = user.fullname
return {
'id': privacy_info_handle(user._primary_key, anonymous),
'fullname': privacy_info_handle(fullname, anonymous, name=True),
'registered': user.is_registered,
}
class Tag(StoredObject):
_id = fields.StringField(primary=True, validate=MaxLengthValidator(128))
def __repr__(self):
return '<Tag() with id {self._id!r}>'.format(self=self)
@property
def url(self):
return '/search/?tags={}'.format(self._id)
class Pointer(StoredObject):
"""A link to a Node. The Pointer delegates all but a few methods to its
contained Node. Forking and registration are overridden such that the
link is cloned, but its contained Node is not.
"""
#: Whether this is a pointer or not
primary = False
_id = fields.StringField()
node = fields.ForeignField('node', backref='_pointed')
_meta = {'optimistic': True}
def _clone(self):
if self.node:
clone = self.clone()
clone.node = self.node
clone.save()
return clone
def fork_node(self, *args, **kwargs):
return self._clone()
def register_node(self, *args, **kwargs):
return self._clone()
def use_as_template(self, *args, **kwargs):
return self._clone()
def resolve(self):
return self.node
def __getattr__(self, item):
"""Delegate attribute access to the node being pointed to."""
# Prevent backref lookups from being overriden by proxied node
try:
return super(Pointer, self).__getattr__(item)
except AttributeError:
pass
if self.node:
return getattr(self.node, item)
raise AttributeError(
'Pointer object has no attribute {0}'.format(
item
)
)
def get_pointer_parent(pointer):
"""Given a `Pointer` object, return its parent node.
"""
# The `parent_node` property of the `Pointer` schema refers to the parents
# of the pointed-at `Node`, not the parents of the `Pointer`; use the
# back-reference syntax to find the parents of the `Pointer`.
parent_refs = pointer.node__parent
assert len(parent_refs) == 1, 'Pointer must have exactly one parent.'
return parent_refs[0]
def validate_category(value):
"""Validator for Node#category. Makes sure that the value is one of the
categories defined in CATEGORY_MAP.
"""
if value not in Node.CATEGORY_MAP.keys():
raise ValidationValueError('Invalid value for category.')
return True
def validate_title(value):
"""Validator for Node#title. Makes sure that the value exists and is not
above 200 characters.
"""
if value is None or not value.strip():
raise ValidationValueError('Title cannot be blank.')
value = sanitize.strip_html(value)
if value is None or not value.strip():
raise ValidationValueError('Invalid title.')
if len(value) > 200:
raise ValidationValueError('Title cannot exceed 200 characters.')
return True
def validate_user(value):
if value != {}:
user_id = value.iterkeys().next()
if User.find(Q('_id', 'eq', user_id)).count() != 1:
raise ValidationValueError('User does not exist.')
return True
class NodeUpdateError(Exception):
def __init__(self, reason, key, *args, **kwargs):
super(NodeUpdateError, self).__init__(*args, **kwargs)
self.key = key
self.reason = reason
class Node(GuidStoredObject, AddonModelMixin, IdentifierMixin):
#: Whether this is a pointer or not
primary = True
__indices__ = [{
'unique': False,
'key_or_list': [
('tags.$', pymongo.ASCENDING),
('is_public', pymongo.ASCENDING),
('is_deleted', pymongo.ASCENDING),
]
}]
# Node fields that trigger an update to Solr on save
SOLR_UPDATE_FIELDS = {
'title',
'category',
'description',
'visible_contributor_ids',
'tags',
'is_fork',
'is_registration',
'retraction',
'embargo',
'is_public',
'is_deleted',
'wiki_pages_current',
'is_retracted',
}
# Maps category identifier => Human-readable representation for use in
# titles, menus, etc.
# Use an OrderedDict so that menu items show in the correct order
CATEGORY_MAP = OrderedDict([
('', 'Uncategorized'),
('project', 'Project'),
('hypothesis', 'Hypothesis'),
('methods and measures', 'Methods and Measures'),
('procedure', 'Procedure'),
('instrumentation', 'Instrumentation'),
('data', 'Data'),
('analysis', 'Analysis'),
('communication', 'Communication'),
('other', 'Other'),
])
# Fields that are writable by Node.update
WRITABLE_WHITELIST = [
'title',
'description',
'category',
'is_public',
]
# Named constants
PRIVATE = 'private'
PUBLIC = 'public'
_id = fields.StringField(primary=True)
date_created = fields.DateTimeField(auto_now_add=datetime.datetime.utcnow, index=True)
# Privacy
is_public = fields.BooleanField(default=False, index=True)
# User mappings
permissions = fields.DictionaryField()
visible_contributor_ids = fields.StringField(list=True)
# Project Organization
is_dashboard = fields.BooleanField(default=False, index=True)
is_folder = fields.BooleanField(default=False, index=True)
# Expanded: Dictionary field mapping user IDs to expand state of this node:
# {
# 'icpnw': True,
# 'cdi38': False,
# }
expanded = fields.DictionaryField(default={}, validate=validate_user)
is_deleted = fields.BooleanField(default=False, index=True)
deleted_date = fields.DateTimeField(index=True)
is_registration = fields.BooleanField(default=False, index=True)
registered_date = fields.DateTimeField(index=True)
registered_user = fields.ForeignField('user', backref='registered')
registered_schema = fields.ForeignField('metaschema', backref='registered')
registered_meta = fields.DictionaryField()
registration_approval = fields.ForeignField('registrationapproval')
retraction = fields.ForeignField('retraction')
embargo = fields.ForeignField('embargo')
is_fork = fields.BooleanField(default=False, index=True)
forked_date = fields.DateTimeField(index=True)
title = fields.StringField(validate=validate_title)
description = fields.StringField()
category = fields.StringField(validate=validate_category, index=True)
# One of 'public', 'private'
# TODO: Add validator
comment_level = fields.StringField(default='private')
wiki_pages_current = fields.DictionaryField()
wiki_pages_versions = fields.DictionaryField()
# Dictionary field mapping node wiki page to sharejs private uuid.
# {<page_name>: <sharejs_id>}
wiki_private_uuids = fields.DictionaryField()
file_guid_to_share_uuids = fields.DictionaryField()
creator = fields.ForeignField('user', backref='created')
contributors = fields.ForeignField('user', list=True, backref='contributed')
users_watching_node = fields.ForeignField('user', list=True, backref='watched')
logs = fields.ForeignField('nodelog', list=True, backref='logged')
tags = fields.ForeignField('tag', list=True, backref='tagged')
# Tags for internal use
system_tags = fields.StringField(list=True)
nodes = fields.AbstractForeignField(list=True, backref='parent')
forked_from = fields.ForeignField('node', backref='forked', index=True)
registered_from = fields.ForeignField('node', backref='registrations', index=True)
# The node (if any) used as a template for this node's creation
template_node = fields.ForeignField('node', backref='template_node', index=True)
piwik_site_id = fields.StringField()
# Dictionary field mapping user id to a list of nodes in node.nodes which the user has subscriptions for
# {<User.id>: [<Node._id>, <Node2._id>, ...] }
child_node_subscriptions = fields.DictionaryField(default=dict)
_meta = {
'optimistic': True,
}
def __init__(self, *args, **kwargs):
tags = kwargs.pop('tags', [])
super(Node, self).__init__(*args, **kwargs)
# Ensure when Node is created with tags through API, tags are added to Tag
if tags:
for tag in tags:
self.add_tag(tag, Auth(self.creator), save=False, log=False)
if kwargs.get('_is_loaded', False):
return
if self.creator:
self.contributors.append(self.creator)
self.set_visible(self.creator, visible=True, log=False)
# Add default creator permissions
for permission in CREATOR_PERMISSIONS:
self.add_permission(self.creator, permission, save=False)
def __repr__(self):
return ('<Node(title={self.title!r}, category={self.category!r}) '
'with _id {self._id!r}>').format(self=self)
# For Django compatibility
@property
def pk(self):
return self._id
@property
def category_display(self):
"""The human-readable representation of this node's category."""
return self.CATEGORY_MAP[self.category]
# We need the following 2 properties in order to serialize related links in NodeRegistrationSerializer
@property
def registered_user_id(self):
"""The ID of the user who registered this node if this is a registration, else None.
"""
if self.registered_user:
return self.registered_user._id
return None
@property
def registered_from_id(self):
"""The ID of the user who registered this node if this is a registration, else None.
"""
if self.registered_from:
return self.registered_from._id
return None
@property
def sanction(self):
sanction = self.registration_approval or self.embargo or self.retraction
if sanction:
return sanction
elif self.parent_node:
return self.parent_node.sanction
else:
return None
@property
def is_pending_registration(self):
if not self.is_registration:
return False
if self.registration_approval is None:
if self.parent_node:
return self.parent_node.is_pending_registration
return False
return self.registration_approval.pending_approval
@property
def is_registration_approved(self):
if self.registration_approval is None:
if self.parent_node:
return self.parent_node.is_registration_approved
return False
return self.registration_approval.is_approved
@property
def is_retracted(self):
if self.retraction is None:
if self.parent_node:
return self.parent_node.is_retracted
return False
return self.retraction.is_approved
@property
def is_pending_retraction(self):
if self.retraction is None:
if self.parent_node:
return self.parent_node.is_pending_retraction
return False
return self.retraction.pending_approval
@property
def embargo_end_date(self):
if self.embargo is None:
if self.parent_node:
return self.parent_node.embargo_end_date
return False
return self.embargo.embargo_end_date
@property
def is_pending_embargo(self):
if self.embargo is None:
if self.parent_node:
return self.parent_node.is_pending_embargo
return False
return self.embargo.pending_approval
@property
def is_pending_embargo_for_existing_registration(self):
""" Returns True if Node has an Embargo pending approval for an
existing registrations. This is used specifically to ensure
registrations pre-dating the Embargo feature do not get deleted if
their respective Embargo request is rejected.
"""
if self.embargo is None:
if self.parent_node:
return self.parent_node.is_pending_embargo_for_existing_registration
return False
return self.embargo.pending_registration
@property
def private_links(self):
return self.privatelink__shared
@property
def private_links_active(self):
return [x for x in self.private_links if not x.is_deleted]
@property
def private_link_keys_active(self):
return [x.key for x in self.private_links if not x.is_deleted]
@property
def private_link_keys_deleted(self):
return [x.key for x in self.private_links if x.is_deleted]
def path_above(self, auth):
parents = self.parents
return '/' + '/'.join([p.title if p.can_view(auth) else '-- private project --' for p in reversed(parents)])
@property
def ids_above(self):
parents = self.parents
return {p._id for p in parents}
@property
def nodes_active(self):
return [x for x in self.nodes if not x.is_deleted]
def can_edit(self, auth=None, user=None):
"""Return if a user is authorized to edit this node.
Must specify one of (`auth`, `user`).
:param Auth auth: Auth object to check
:param User user: User object to check
:returns: Whether user has permission to edit this node.
"""
if not auth and not user:
raise ValueError('Must pass either `auth` or `user`')
if auth and user:
raise ValueError('Cannot pass both `auth` and `user`')
user = user or auth.user
if auth:
is_api_node = auth.api_node == self
else:
is_api_node = False
return (
(user and self.has_permission(user, 'write'))
or is_api_node
)
def active_contributors(self, include=lambda n: True):
for contrib in self.contributors:
if contrib.is_active and include(contrib):
yield contrib
def is_admin_parent(self, user):
if self.has_permission(user, 'admin', check_parent=False):
return True
if self.parent_node:
return self.parent_node.is_admin_parent(user)
return False
def can_view(self, auth):
if not auth and not self.is_public:
return False
return (
self.is_public or
(auth.user and self.has_permission(auth.user, 'read')) or
auth.private_key in self.private_link_keys_active or
self.is_admin_parent(auth.user)
)
def is_expanded(self, user=None):
"""Return if a user is has expanded the folder in the dashboard view.
Must specify one of (`auth`, `user`).
:param User user: User object to check
:returns: Boolean if the folder is expanded.
"""
if user._id in self.expanded:
return self.expanded[user._id]
else:
return False
def expand(self, user=None):
self.expanded[user._id] = True
self.save()
def collapse(self, user=None):
self.expanded[user._id] = False
self.save()
def is_derived_from(self, other, attr):
derived_from = getattr(self, attr)
while True:
if derived_from is None:
return False
if derived_from == other:
return True
derived_from = getattr(derived_from, attr)
def is_fork_of(self, other):
return self.is_derived_from(other, 'forked_from')
def is_registration_of(self, other):
return self.is_derived_from(other, 'registered_from')
@property
def forks(self):
"""List of forks of this node"""
return list(self.node__forked.find(Q('is_deleted', 'eq', False) &
Q('is_registration', 'ne', True)))
def add_permission(self, user, permission, save=False):
"""Grant permission to a user.
:param str permission: Permission to grant
:param bool save: Save changes
:raises: ValueError if user already has permission
"""
if user._id not in self.permissions:
self.permissions[user._id] = [permission]
else:
if permission in self.permissions[user._id]:
raise ValueError('User already has permission {0}'.format(permission))
self.permissions[user._id].append(permission)
if save:
self.save()
def remove_permission(self, user, permission, save=False):
"""Revoke permission from a user.
:param User user: User to revoke permission from
:param str permission: Permission to revoke
:param bool save: Save changes
:raises: ValueError if user does not have permission
"""
try:
self.permissions[user._id].remove(permission)
except (KeyError, ValueError):
raise ValueError('User does not have permission {0}'.format(permission))
if save:
self.save()
def clear_permission(self, user, save=False):
"""Clear all permissions for a user.
:param User user: User to revoke permission from
:param bool save: Save changes
:raises: ValueError if user not in permissions
"""
try:
self.permissions.pop(user._id)
except KeyError:
raise ValueError(
'User {0} not in permissions list for node {1}'.format(
user._id, self._id,
)
)
if save:
self.save()
def set_permissions(self, user, permissions, save=False):
self.permissions[user._id] = permissions
if save:
self.save()
def has_permission(self, user, permission, check_parent=True):
"""Check whether user has permission.
:param User user: User to test
:param str permission: Required permission
:returns: User has required permission
"""
if user is None:
logger.warn('User is ``None``.')
return False
if permission in self.permissions.get(user._id, []):
return True
if permission == 'read' and check_parent:
return self.is_admin_parent(user)
return False
def has_permission_on_children(self, user, permission):
"""Checks if the given user has a given permission on any child nodes
that are not registrations or deleted
"""
if self.has_permission(user, permission):
return True
for node in self.nodes:
if not node.primary or node.is_deleted:
continue
if node.has_permission_on_children(user, permission):
return True
return False
def has_addon_on_children(self, addon):
"""Checks if a given node has a specific addon on child nodes
that are not registrations or deleted
"""
if self.has_addon(addon):
return True
for node in self.nodes:
if not node.primary or node.is_deleted:
continue
if node.has_addon_on_children(addon):
return True
return False
def get_permissions(self, user):
"""Get list of permissions for user.
:param User user: User to check
:returns: List of permissions
:raises: ValueError if user not found in permissions
"""
return self.permissions.get(user._id, [])
def adjust_permissions(self):
for key in self.permissions.keys():
if key not in self.contributors:
self.permissions.pop(key)
@property
def visible_contributors(self):
return [
User.load(_id)
for _id in self.visible_contributor_ids
]
@property
def parents(self):
if self.parent_node:
return [self.parent_node] + self.parent_node.parents
return []
@property
def admin_contributor_ids(self, contributors=None):
contributor_ids = self.contributors._to_primary_keys()
admin_ids = set()
for parent in self.parents:
admins = [
user for user, perms in parent.permissions.iteritems()
if 'admin' in perms
]
admin_ids.update(set(admins).difference(contributor_ids))
return admin_ids
@property
def admin_contributors(self):
return sorted(
[User.load(_id) for _id in self.admin_contributor_ids],
key=lambda user: user.family_name,
)
def get_visible(self, user):
if not self.is_contributor(user):
raise ValueError(u'User {0} not in contributors'.format(user))
return user._id in self.visible_contributor_ids
def update_visible_ids(self, save=False):
"""Update the order of `visible_contributor_ids`. Updating on making
a contributor visible is more efficient than recomputing order on
accessing `visible_contributors`.
"""
self.visible_contributor_ids = [
contributor._id
for contributor in self.contributors
if contributor._id in self.visible_contributor_ids
]
if save:
self.save()
def set_visible(self, user, visible, log=True, auth=None, save=False):
if not self.is_contributor(user):
raise ValueError(u'User {0} not in contributors'.format(user))
if visible and user._id not in self.visible_contributor_ids:
self.visible_contributor_ids.append(user._id)
self.update_visible_ids(save=False)
elif not visible and user._id in self.visible_contributor_ids:
if len(self.visible_contributor_ids) == 1:
raise ValueError('Must have at least one visible contributor')
self.visible_contributor_ids.remove(user._id)
else:
return
message = (
NodeLog.MADE_CONTRIBUTOR_VISIBLE
if visible
else NodeLog.MADE_CONTRIBUTOR_INVISIBLE
)
if log:
self.add_log(
message,
params={
'parent': self.parent_id,
'node': self._id,
'contributors': [user._id],
},
auth=auth,
save=False,
)
if save:
self.save()
def can_comment(self, auth):
if self.comment_level == 'public':
return auth.logged_in and (
self.is_public or
(auth.user and self.has_permission(auth.user, 'read'))
)
return self.is_contributor(auth.user)
def update(self, fields, auth=None, save=True):
"""Update the node with the given fields.
:param dict fields: Dictionary of field_name:value pairs.
:param Auth auth: Auth object for the user making the update.
:param bool save: Whether to save after updating the object.
"""
if self.is_registration:
raise NodeUpdateError(reason="Registered content cannot be updated")
if not fields: # Bail out early if there are no fields to update
return False
values = {}
for key, value in fields.iteritems():
if key not in self.WRITABLE_WHITELIST:
continue
# Title and description have special methods for logging purposes
if key == 'title':
self.set_title(title=value, auth=auth, save=False)
elif key == 'description':
self.set_description(description=value, auth=auth, save=False)
elif key == 'is_public':
self.set_privacy(
Node.PUBLIC if value else Node.PRIVATE,
auth=auth,
log=True,
save=False
)
else:
with warnings.catch_warnings():
try:
# This is in place because historically projects and components
# live on different ElasticSearch indexes, and at the time of Node.save
# there is no reliable way to check what the old Node.category
# value was. When the cateogory changes it is possible to have duplicate/dead
# search entries, so always delete the ES doc on categoryt change
# TODO: consolidate Node indexes into a single index, refactor search
if key == 'category':
self.delete_search_entry()
###############
old_value = getattr(self, key)
if old_value != value:
values[key] = {
'old': old_value,
'new': value,
}
setattr(self, key, value)
except AttributeError:
raise NodeUpdateError(reason="Invalid value for attribute '{0}'".format(key), key=key)
except warnings.Warning:
raise NodeUpdateError(reason="Attribute '{0}' doesn't exist on the Node class".format(key), key=key)
if save:
updated = self.save()
else:
updated = []
for key in values:
values[key]['new'] = getattr(self, key)
if values:
self.add_log(
NodeLog.UPDATED_FIELDS,
params={
'node': self._id,
'updated_fields': {
key: {
'old': values[key]['old'],
'new': values[key]['new']
}
for key in values
}
},
auth=auth)
return updated
def save(self, *args, **kwargs):
update_piwik = kwargs.pop('update_piwik', True)
self.adjust_permissions()
first_save = not self._is_loaded
if first_save and self.is_dashboard:
existing_dashboards = self.creator.node__contributed.find(
Q('is_dashboard', 'eq', True)
)
if existing_dashboards.count() > 0:
raise NodeStateError("Only one dashboard allowed per user.")
is_original = not self.is_registration and not self.is_fork
if 'suppress_log' in kwargs.keys():
suppress_log = kwargs['suppress_log']
del kwargs['suppress_log']
else:
suppress_log = False
saved_fields = super(Node, self).save(*args, **kwargs)
if first_save and is_original and not suppress_log:
# TODO: This logic also exists in self.use_as_template()
for addon in settings.ADDONS_AVAILABLE:
if 'node' in addon.added_default:
self.add_addon(addon.short_name, auth=None, log=False)
# Define log fields for non-component project
log_action = NodeLog.PROJECT_CREATED
log_params = {
'node': self._primary_key,
}
if getattr(self, 'parent', None):
# Append log to parent
self.parent.nodes.append(self)
self.parent.save()
log_params.update({'parent_node': self.parent._primary_key})
# Add log with appropriate fields
self.add_log(
log_action,
params=log_params,
auth=Auth(user=self.creator),
log_date=self.date_created,
save=True,
)
# Only update Solr if at least one stored field has changed, and if
# public or privacy setting has changed
need_update = bool(self.SOLR_UPDATE_FIELDS.intersection(saved_fields))
if not self.is_public:
if first_save or 'is_public' not in saved_fields:
need_update = False
if self.is_folder or self.archiving:
need_update = False
if need_update:
self.update_search()
# This method checks what has changed.
if settings.PIWIK_HOST and update_piwik:
piwik_tasks.update_node(self._id, saved_fields)
# Return expected value for StoredObject::save
return saved_fields
######################################
# Methods that return a new instance #
######################################
def use_as_template(self, auth, changes=None, top_level=True):
"""Create a new project, using an existing project as a template.
:param auth: The user to be assigned as creator
:param changes: A dictionary of changes, keyed by node id, which
override the attributes of the template project or its
children.
:return: The `Node` instance created.
"""
changes = changes or dict()
# build the dict of attributes to change for the new node
try:
attributes = changes[self._id]
# TODO: explicitly define attributes which may be changed.
except (AttributeError, KeyError):
attributes = dict()
new = self.clone()
# clear permissions, which are not cleared by the clone method
new.permissions = {}
new.visible_contributor_ids = []
# Clear quasi-foreign fields
new.wiki_pages_current = {}
new.wiki_pages_versions = {}
new.wiki_private_uuids = {}
new.file_guid_to_share_uuids = {}
# set attributes which may be overridden by `changes`
new.is_public = False
new.description = None
# apply `changes`
for attr, val in attributes.iteritems():
setattr(new, attr, val)
# set attributes which may NOT be overridden by `changes`
new.creator = auth.user
new.template_node = self
new.add_contributor(contributor=auth.user, permissions=CREATOR_PERMISSIONS, log=False, save=False)
new.is_fork = False
new.is_registration = False
new.piwik_site_id = None
# If that title hasn't been changed, apply the default prefix (once)
if (new.title == self.title
and top_level
and language.TEMPLATED_FROM_PREFIX not in new.title):
new.title = ''.join((language.TEMPLATED_FROM_PREFIX, new.title, ))
# Slight hack - date_created is a read-only field.
new._fields['date_created'].__set__(
new,
datetime.datetime.utcnow(),
safe=True
)
new.save(suppress_log=True)
# Log the creation
new.add_log(
NodeLog.CREATED_FROM,
params={
'node': new._primary_key,
'template_node': {
'id': self._primary_key,
'url': self.url,
'title': self.title,
},
},
auth=auth,
log_date=new.date_created,
save=False,
)
# add mandatory addons
# TODO: This logic also exists in self.save()
for addon in settings.ADDONS_AVAILABLE:
if 'node' in addon.added_default:
new.add_addon(addon.short_name, auth=None, log=False)
# deal with the children of the node, if any
new.nodes = [
x.use_as_template(auth, changes, top_level=False)
for x in self.nodes
if x.can_view(auth)
]
new.save()
return new
############
# Pointers #
############
def add_pointer(self, node, auth, save=True):
"""Add a pointer to a node.
:param Node node: Node to add
:param Auth auth: Consolidated authorization
:param bool save: Save changes
:return: Created pointer
"""
# Fail if node already in nodes / pointers. Note: cast node and node
# to primary keys to test for conflicts with both nodes and pointers
# contained in `self.nodes`.
if node._id in self.node_ids:
raise ValueError(
'Pointer to node {0} already in list'.format(node._id)
)
if self.is_registration:
raise NodeStateError('Cannot add a pointer to a registration')
# If a folder, prevent more than one pointer to that folder. This will prevent infinite loops on the Dashboard.
# Also, no pointers to the dashboard project, which could cause loops as well.
already_pointed = node.pointed
if node.is_folder and len(already_pointed) > 0:
raise ValueError(
'Pointer to folder {0} already exists. Only one pointer to any given folder allowed'.format(node._id)
)
if node.is_dashboard:
raise ValueError(
'Pointer to dashboard ({0}) not allowed.'.format(node._id)
)
# Append pointer
pointer = Pointer(node=node)
pointer.save()
self.nodes.append(pointer)
# Add log
self.add_log(
action=NodeLog.POINTER_CREATED,
params={
'parent_node': self.parent_id,
'node': self._primary_key,
'pointer': {
'id': pointer.node._id,
'url': pointer.node.url,
'title': pointer.node.title,
'category': pointer.node.category,
},
},
auth=auth,
save=False,
)
# Optionally save changes
if save:
self.save()
return pointer
def rm_pointer(self, pointer, auth):
"""Remove a pointer.
:param Pointer pointer: Pointer to remove
:param Auth auth: Consolidated authorization
"""
if pointer not in self.nodes:
raise ValueError('Node link does not belong to the requested node.')
# Remove `Pointer` object; will also remove self from `nodes` list of
# parent node
Pointer.remove_one(pointer)
# Add log
self.add_log(
action=NodeLog.POINTER_REMOVED,
params={
'parent_node': self.parent_id,
'node': self._primary_key,
'pointer': {
'id': pointer.node._id,
'url': pointer.node.url,
'title': pointer.node.title,
'category': pointer.node.category,
},
},
auth=auth,
save=False,
)
@property
def node_ids(self):
return [
node._id if node.primary else node.node._id
for node in self.nodes
]
@property
def nodes_primary(self):
return [
node
for node in self.nodes
if node.primary
]
def node_and_primary_descendants(self):
"""Return an iterator for a node and all of its primary (non-pointer) descendants.
:param node Node: target Node
"""
return itertools.chain([self], self.get_descendants_recursive(lambda n: n.primary))
@property
def depth(self):
return len(self.parents)
def next_descendants(self, auth, condition=lambda auth, node: True):
"""
Recursively find the first set of descedants under a given node that meet a given condition
returns a list of [(node, [children]), ...]
"""
ret = []
for node in self.nodes:
if condition(auth, node):
# base case
ret.append((node, []))
else:
ret.append((node, node.next_descendants(auth, condition)))
ret = [item for item in ret if item[1] or condition(auth, item[0])] # prune empty branches
return ret
def get_descendants_recursive(self, include=lambda n: True):
for node in self.nodes:
if include(node):
yield node
if node.primary:
for descendant in node.get_descendants_recursive(include):
if include(descendant):
yield descendant
def get_aggregate_logs_queryset(self, auth):
ids = [self._id] + [n._id
for n in self.get_descendants_recursive()
if n.can_view(auth)]
query = Q('__backrefs.logged.node.logs', 'in', ids) & Q('should_hide', 'ne', True)
return NodeLog.find(query).sort('-_id')
@property
def nodes_pointer(self):
return [
node
for node in self.nodes
if not node.primary
]
@property
def has_pointers_recursive(self):
"""Recursively checks whether the current node or any of its nodes
contains a pointer.
"""
if self.nodes_pointer:
return True
for node in self.nodes_primary:
if node.has_pointers_recursive:
return True
return False
@property
def pointed(self):
return getattr(self, '_pointed', [])
def pointing_at(self, pointed_node_id):
"""This node is pointed at another node.
:param Node pointed_node_id: The node id of the node being pointed at.
:return: pointer_id
"""
for pointer in self.nodes_pointer:
node_id = pointer.node._id
if node_id == pointed_node_id:
return pointer._id
return None
def get_points(self, folders=False, deleted=False, resolve=True):
ret = []
for each in self.pointed:
pointer_node = get_pointer_parent(each)
if not folders and pointer_node.is_folder:
continue
if not deleted and pointer_node.is_deleted:
continue
if resolve:
ret.append(pointer_node)
else:
ret.append(each)
return ret
def resolve(self):
return self
def fork_pointer(self, pointer, auth, save=True):
"""Replace a pointer with a fork. If the pointer points to a project,
fork the project and replace the pointer with a new pointer pointing
to the fork. If the pointer points to a component, fork the component
and add it to the current node.
:param Pointer pointer:
:param Auth auth:
:param bool save:
:return: Forked node
"""
# Fail if pointer not contained in `nodes`
try:
index = self.nodes.index(pointer)
except ValueError:
raise ValueError('Pointer {0} not in list'.format(pointer._id))
# Get pointed node
node = pointer.node
# Fork into current node and replace pointer with forked component
forked = node.fork_node(auth)
if forked is None:
raise ValueError('Could not fork node')
self.nodes[index] = forked
# Add log
self.add_log(
NodeLog.POINTER_FORKED,
params={
'parent_node': self.parent_id,
'node': self._primary_key,
'pointer': {
'id': pointer.node._id,
'url': pointer.node.url,
'title': pointer.node.title,
'category': pointer.node.category,
},
},
auth=auth,
save=False,
)
# Optionally save changes
if save:
self.save()
# Garbage-collect pointer. Note: Must save current node before
# removing pointer, else remove will fail when trying to remove
# backref from self to pointer.
Pointer.remove_one(pointer)
# Return forked content
return forked
def get_recent_logs(self, n=10):
"""Return a list of the n most recent logs, in reverse chronological
order.
:param int n: Number of logs to retrieve
"""
return list(reversed(self.logs)[:n])
@property
def date_modified(self):
'''The most recent datetime when this node was modified, based on
the logs.
'''
try:
return self.logs[-1].date
except IndexError:
return self.date_created
def set_title(self, title, auth, save=False):
"""Set the title of this Node and log it.
:param str title: The new title.
:param auth: All the auth information including user, API key.
"""
#Called so validation does not have to wait until save.
validate_title(title)
original_title = self.title
new_title = sanitize.strip_html(title)
# Title hasn't changed after sanitzation, bail out
if original_title == new_title:
return False
self.title = new_title
self.add_log(
action=NodeLog.EDITED_TITLE,
params={
'parent_node': self.parent_id,
'node': self._primary_key,
'title_new': self.title,
'title_original': original_title,
},
auth=auth,
save=False,
)
if save:
self.save()
return None
def set_description(self, description, auth, save=False):
"""Set the description and log the event.
:param str description: The new description
:param auth: All the auth informtion including user, API key.
:param bool save: Save self after updating.
"""
original = self.description
new_description = sanitize.strip_html(description)
if original == new_description:
return False
self.description = new_description
self.add_log(
action=NodeLog.EDITED_DESCRIPTION,
params={
'parent_node': self.parent_id,
'node': self._primary_key,
'description_new': self.description,
'description_original': original
},
auth=auth,
save=False,
)
if save:
self.save()
return None
def update_search(self):
from website import search
try:
search.search.update_node(self)
except search.exceptions.SearchUnavailableError as e:
logger.exception(e)
log_exception()
@classmethod
def bulk_update_search(cls, nodes):
from website import search
try:
serialize = functools.partial(search.search.update_node, bulk=True)
search.search.bulk_update_nodes(serialize, nodes)
except search.exceptions.SearchUnavailableError as e:
logger.exception(e)
log_exception()
def delete_search_entry(self):
from website import search
try:
search.search.delete_node(self)
except search.exceptions.SearchUnavailableError as e:
logger.exception(e)
log_exception()
def delete_registration_tree(self, save=False):
self.is_deleted = True
if not getattr(self.embargo, 'for_existing_registration', False):
self.registered_from = None
if save:
self.save()
self.update_search()
for child in self.nodes_primary:
child.delete_registration_tree(save=save)
def remove_node(self, auth, date=None):
"""Marks a node as deleted.
TODO: Call a hook on addons
Adds a log to the parent node if applicable
:param auth: an instance of :class:`Auth`.
:param date: Date node was removed
:type date: `datetime.datetime` or `None`
"""
# TODO: rename "date" param - it's shadowing a global
if self.is_dashboard:
raise NodeStateError("Dashboards may not be deleted.")
if not self.can_edit(auth):
raise PermissionsError('{0!r} does not have permission to modify this {1}'.format(auth.user, self.category or 'node'))
#if this is a folder, remove all the folders that this is pointing at.
if self.is_folder:
for pointed in self.nodes_pointer:
if pointed.node.is_folder:
pointed.node.remove_node(auth=auth)
if [x for x in self.nodes_primary if not x.is_deleted]:
raise NodeStateError("Any child components must be deleted prior to deleting this project.")
# After delete callback
for addon in self.get_addons():
message = addon.after_delete(self, auth.user)
if message:
status.push_status_message(message, kind='info', trust=False)
log_date = date or datetime.datetime.utcnow()
# Add log to parent
if self.node__parent:
self.node__parent[0].add_log(
NodeLog.NODE_REMOVED,
params={
'project': self._primary_key,
},
auth=auth,
log_date=log_date,
save=True,
)
else:
self.add_log(
NodeLog.PROJECT_DELETED,
params={
'project': self._primary_key,
},
auth=auth,
log_date=log_date,
save=True,
)
self.is_deleted = True
self.deleted_date = date
self.save()
auth_signals.node_deleted.send(self)
return True
def fork_node(self, auth, title='Fork of '):
"""Recursively fork a node.
:param Auth auth: Consolidated authorization
:param str title: Optional text to prepend to forked title
:return: Forked node
"""
user = auth.user
# Non-contributors can't fork private nodes
if not (self.is_public or self.has_permission(user, 'read')):
raise PermissionsError('{0!r} does not have permission to fork node {1!r}'.format(user, self._id))
when = datetime.datetime.utcnow()
original = self.load(self._primary_key)
if original.is_deleted:
raise NodeStateError('Cannot fork deleted node.')
# Note: Cloning a node copies its `wiki_pages_current` and
# `wiki_pages_versions` fields, but does not clone the underlying
# database objects to which these dictionaries refer. This means that
# the cloned node must pass itself to its wiki objects to build the
# correct URLs to that content.
forked = original.clone()
forked.logs = self.logs
forked.tags = self.tags
# Recursively fork child nodes
for node_contained in original.nodes:
if not node_contained.is_deleted:
forked_node = None
try: # Catch the potential PermissionsError above
forked_node = node_contained.fork_node(auth=auth, title='')
except PermissionsError:
pass # If this exception is thrown omit the node from the result set
if forked_node is not None:
forked.nodes.append(forked_node)
forked.title = title + forked.title
forked.is_fork = True
forked.is_registration = False
forked.forked_date = when
forked.forked_from = original
forked.creator = user
forked.piwik_site_id = None
# Forks default to private status
forked.is_public = False
# Clear permissions before adding users
forked.permissions = {}
forked.visible_contributor_ids = []
forked.add_contributor(
contributor=user,
permissions=CREATOR_PERMISSIONS,
log=False,
save=False
)
forked.add_log(
action=NodeLog.NODE_FORKED,
params={
'parent_node': original.parent_id,
'node': original._primary_key,
'registration': forked._primary_key,
},
auth=auth,
log_date=when,
save=False,
)
forked.save()
# After fork callback
for addon in original.get_addons():
_, message = addon.after_fork(original, forked, user)
if message:
status.push_status_message(message, kind='info', trust=True)
return forked
def register_node(self, schema, auth, template, data, parent=None):
"""Make a frozen copy of a node.
:param schema: Schema object
:param auth: All the auth information including user, API key.
:param template: Template name
:param data: Form data
:param parent Node: parent registration of registration to be created
"""
# NOTE: Admins can register child nodes even if they don't have write access them
if not self.can_edit(auth=auth) and not self.is_admin_parent(user=auth.user):
raise PermissionsError(
'User {} does not have permission '
'to register this node'.format(auth.user._id)
)
if self.is_folder:
raise NodeStateError("Folders may not be registered")
template = urllib.unquote_plus(template)
template = to_mongo(template)
when = datetime.datetime.utcnow()
original = self.load(self._primary_key)
# Note: Cloning a node copies its `wiki_pages_current` and
# `wiki_pages_versions` fields, but does not clone the underlying
# database objects to which these dictionaries refer. This means that
# the cloned node must pass itself to its wiki objects to build the
# correct URLs to that content.
if original.is_deleted:
raise NodeStateError('Cannot register deleted node.')
registered = original.clone()
registered.is_registration = True
registered.registered_date = when
registered.registered_user = auth.user
registered.registered_schema = schema
registered.registered_from = original
if not registered.registered_meta:
registered.registered_meta = {}
registered.registered_meta[template] = data
registered.contributors = self.contributors
registered.forked_from = self.forked_from
registered.creator = self.creator
registered.logs = self.logs
registered.tags = self.tags
registered.piwik_site_id = None
registered.save()
if parent:
registered.parent_node = parent
# After register callback
for addon in original.get_addons():
_, message = addon.after_register(original, registered, auth.user)
if message:
status.push_status_message(message, kind='info', trust=False)
for node_contained in original.nodes:
if not node_contained.is_deleted:
child_registration = node_contained.register_node(
schema, auth, template, data, parent=registered
)
if child_registration and not child_registration.primary:
registered.nodes.append(child_registration)
registered.save()
if settings.ENABLE_ARCHIVER:
project_signals.after_create_registration.send(self, dst=registered, user=auth.user)
return registered
def remove_tag(self, tag, auth, save=True):
if tag in self.tags:
self.tags.remove(tag)
self.add_log(
action=NodeLog.TAG_REMOVED,
params={
'parent_node': self.parent_id,
'node': self._primary_key,
'tag': tag,
},
auth=auth,
save=False,
)
if save:
self.save()
def add_tag(self, tag, auth, save=True, log=True):
if tag not in self.tags:
new_tag = Tag.load(tag)
if not new_tag:
new_tag = Tag(_id=tag)
new_tag.save()
self.tags.append(new_tag)
if log:
self.add_log(
action=NodeLog.TAG_ADDED,
params={
'parent_node': self.parent_id,
'node': self._primary_key,
'tag': tag,
},
auth=auth,
save=False,
)
if save:
self.save()
def add_log(self, action, params, auth, foreign_user=None, log_date=None, save=True):
user = auth.user if auth else None
params['node'] = params.get('node') or params.get('project')
log = NodeLog(
action=action,
user=user,
foreign_user=foreign_user,
params=params,
)
if log_date:
log.date = log_date
log.save()
self.logs.append(log)
if save:
self.save()
if user:
increment_user_activity_counters(user._primary_key, action, log.date)
return log
@property
def url(self):
return '/{}/'.format(self._primary_key)
def web_url_for(self, view_name, _absolute=False, _guid=False, *args, **kwargs):
return web_url_for(view_name, pid=self._primary_key, _absolute=_absolute, _guid=_guid, *args, **kwargs)
def api_url_for(self, view_name, _absolute=False, *args, **kwargs):
return api_url_for(view_name, pid=self._primary_key, _absolute=_absolute, *args, **kwargs)
@property
def absolute_url(self):
if not self.url:
logger.error('Node {0} has a parent that is not a project'.format(self._id))
return None
return urlparse.urljoin(settings.DOMAIN, self.url)
@property
def display_absolute_url(self):
url = self.absolute_url
if url is not None:
return re.sub(r'https?:', '', url).strip('/')
@property
def api_v2_url(self):
return reverse('nodes:node-detail', kwargs={'node_id': self._id})
@property
def absolute_api_v2_url(self):
if self.is_registration:
return absolute_reverse('registrations:registration-detail', kwargs={'registration_id': self._id})
return absolute_reverse('nodes:node-detail', kwargs={'node_id': self._id})
# used by django and DRF
def get_absolute_url(self):
return self.absolute_api_v2_url
@property
def api_url(self):
if not self.url:
logger.error('Node {0} has a parent that is not a project'.format(self._id))
return None
return '/api/v1{0}'.format(self.deep_url)
@property
def deep_url(self):
return '/project/{}/'.format(self._primary_key)
@property
def csl(self): # formats node information into CSL format for citation parsing
"""a dict in CSL-JSON schema
For details on this schema, see:
https://github.com/citation-style-language/schema#csl-json-schema
"""
csl = {
'id': self._id,
'title': sanitize.unescape_entities(self.title),
'author': [
contributor.csl_name # method in auth/model.py which parses the names of authors
for contributor in self.visible_contributors
],
'publisher': 'Open Science Framework',
'type': 'webpage',
'URL': self.display_absolute_url,
}
doi = self.get_identifier_value('doi')
if doi:
csl['DOI'] = doi
if self.logs:
csl['issued'] = datetime_to_csl(self.logs[-1].date)
return csl
def author_list(self, and_delim='&'):
author_names = [
author.biblio_name
for author in self.visible_contributors
if author
]
if len(author_names) < 2:
return ' {0} '.format(and_delim).join(author_names)
if len(author_names) > 7:
author_names = author_names[:7]
author_names.append('et al.')
return ', '.join(author_names)
return u'{0}, {1} {2}'.format(
', '.join(author_names[:-1]),
and_delim,
author_names[-1]
)
@property
def templated_list(self):
return [
x
for x in self.node__template_node
if not x.is_deleted
]
@property
def parent_node(self):
"""The parent node, if it exists, otherwise ``None``. Note: this
property is named `parent_node` rather than `parent` to avoid a
conflict with the `parent` back-reference created by the `nodes`
field on this schema.
"""
try:
if not self.node__parent[0].is_deleted:
return self.node__parent[0]
except IndexError:
pass
return None
@parent_node.setter
def parent_node(self, parent):
parent.nodes.append(self)
parent.save()
@property
def root(self):
if self.parent_node:
return self.parent_node.root
else:
return self
@property
def archiving(self):
job = self.archive_job
return job and not job.done and not job.archive_tree_finished()
@property
def archive_job(self):
return self.archivejob__active[0] if self.archivejob__active else None
@property
def registrations(self):
return self.node__registrations.find(Q('archiving', 'eq', False))
@property
def watch_url(self):
return os.path.join(self.api_url, "watch/")
@property
def parent_id(self):
if self.node__parent:
return self.node__parent[0]._primary_key
return None
@property
def forked_from_id(self):
if self.forked_from:
return self.forked_from._id
return None
@property
def project_or_component(self):
return 'project' if self.category == 'project' else 'component'
def is_contributor(self, user):
return (
user is not None
and (
user._id in self.contributors
)
)
def add_addon(self, addon_name, auth, log=True, *args, **kwargs):
"""Add an add-on to the node. Do nothing if the addon is already
enabled.
:param str addon_name: Name of add-on
:param Auth auth: Consolidated authorization object
:param bool log: Add a log after adding the add-on
:return: A boolean, whether the addon was added
"""
ret = AddonModelMixin.add_addon(self, addon_name, auth=auth,
*args, **kwargs)
if ret and log:
config = settings.ADDONS_AVAILABLE_DICT[addon_name]
self.add_log(
action=NodeLog.ADDON_ADDED,
params={
'project': self.parent_id,
'node': self._primary_key,
'addon': config.full_name,
},
auth=auth,
save=False,
)
self.save() # TODO: here, or outside the conditional? @mambocab
return ret
def delete_addon(self, addon_name, auth, _force=False):
"""Delete an add-on from the node.
:param str addon_name: Name of add-on
:param Auth auth: Consolidated authorization object
:param bool _force: For migration testing ONLY. Do not set to True
in the application, or else projects will be allowed to delete
mandatory add-ons!
:return bool: Add-on was deleted
"""
ret = super(Node, self).delete_addon(addon_name, auth, _force)
if ret:
config = settings.ADDONS_AVAILABLE_DICT[addon_name]
self.add_log(
action=NodeLog.ADDON_REMOVED,
params={
'project': self.parent_id,
'node': self._primary_key,
'addon': config.full_name,
},
auth=auth,
save=False,
)
self.save()
# TODO: save here or outside the conditional? @mambocab
return ret
def callback(self, callback, recursive=False, *args, **kwargs):
"""Invoke callbacks of attached add-ons and collect messages.
:param str callback: Name of callback method to invoke
:param bool recursive: Apply callback recursively over nodes
:return list: List of callback messages
"""
messages = []
for addon in self.get_addons():
method = getattr(addon, callback)
message = method(self, *args, **kwargs)
if message:
messages.append(message)
if recursive:
for child in self.nodes:
if not child.is_deleted:
messages.extend(
child.callback(
callback, recursive, *args, **kwargs
)
)
return messages
def replace_contributor(self, old, new):
for i, contrib in enumerate(self.contributors):
if contrib._primary_key == old._primary_key:
self.contributors[i] = new
# Remove unclaimed record for the project
if self._primary_key in old.unclaimed_records:
del old.unclaimed_records[self._primary_key]
old.save()
for permission in self.get_permissions(old):
self.add_permission(new, permission)
self.permissions.pop(old._id)
if old._id in self.visible_contributor_ids:
self.visible_contributor_ids[self.visible_contributor_ids.index(old._id)] = new._id
return True
return False
def remove_contributor(self, contributor, auth, log=True):
"""Remove a contributor from this node.
:param contributor: User object, the contributor to be removed
:param auth: All the auth information including user, API key.
"""
# remove unclaimed record if necessary
if self._primary_key in contributor.unclaimed_records:
del contributor.unclaimed_records[self._primary_key]
self.contributors.remove(contributor._id)
self.clear_permission(contributor)
if contributor._id in self.visible_contributor_ids:
self.visible_contributor_ids.remove(contributor._id)
if not self.visible_contributor_ids:
return False
# Node must have at least one registered admin user
# TODO: Move to validator or helper
admins = [
user for user in self.contributors
if self.has_permission(user, 'admin')
and user.is_registered
]
if not admins:
return False
# Clear permissions for removed user
self.permissions.pop(contributor._id, None)
# After remove callback
for addon in self.get_addons():
message = addon.after_remove_contributor(self, contributor, auth)
if message:
status.push_status_message(message, kind='info', trust=True)
if log:
self.add_log(
action=NodeLog.CONTRIB_REMOVED,
params={
'project': self.parent_id,
'node': self._primary_key,
'contributors': [contributor._id],
},
auth=auth,
save=False,
)
self.save()
#send signal to remove this user from project subscriptions
auth_signals.contributor_removed.send(contributor, node=self)
return True
def remove_contributors(self, contributors, auth=None, log=True, save=False):
results = []
removed = []
for contrib in contributors:
outcome = self.remove_contributor(
contributor=contrib, auth=auth, log=False,
)
results.append(outcome)
removed.append(contrib._id)
if log:
self.add_log(
action=NodeLog.CONTRIB_REMOVED,
params={
'project': self.parent_id,
'node': self._primary_key,
'contributors': removed,
},
auth=auth,
save=False,
)
if save:
self.save()
if False in results:
return False
return True
def update_contributor(self, user, permission, visible, auth, save=False):
""" TODO: this method should be updated as a replacement for the main loop of
Node#manage_contributors. Right now there are redundancies, but to avoid major
feature creep this will not be included as this time.
Also checks to make sure unique admin is not removing own admin privilege.
"""
if not self.has_permission(auth.user, ADMIN):
raise PermissionsError("Only admins can modify contributor permissions")
permissions = expand_permissions(permission) or DEFAULT_CONTRIBUTOR_PERMISSIONS
admins = [contrib for contrib in self.contributors if self.has_permission(contrib, 'admin') and contrib.is_active]
if not len(admins) > 1:
# has only one admin
admin = admins[0]
if admin == user and ADMIN not in permissions:
raise NodeStateError('{} is the only admin.'.format(user.fullname))
if user not in self.contributors:
raise ValueError(
'User {0} not in contributors'.format(user.fullname)
)
if permission:
permissions = expand_permissions(permission)
if set(permissions) != set(self.get_permissions(user)):
self.set_permissions(user, permissions, save=save)
permissions_changed = {
user._id: permissions
}
self.add_log(
action=NodeLog.PERMISSIONS_UPDATED,
params={
'project': self.parent_id,
'node': self._id,
'contributors': permissions_changed,
},
auth=auth,
save=save
)
with TokuTransaction():
if ['read'] in permissions_changed.values():
project_signals.write_permissions_revoked.send(self)
if visible is not None:
self.set_visible(user, visible, auth=auth, save=save)
self.update_visible_ids()
def manage_contributors(self, user_dicts, auth, save=False):
"""Reorder and remove contributors.
:param list user_dicts: Ordered list of contributors represented as
dictionaries of the form:
{'id': <id>, 'permission': <One of 'read', 'write', 'admin'>, 'visible': bool}
:param Auth auth: Consolidated authentication information
:param bool save: Save changes
:raises: ValueError if any users in `users` not in contributors or if
no admin contributors remaining
"""
with TokuTransaction():
users = []
user_ids = []
permissions_changed = {}
visibility_removed = []
to_retain = []
to_remove = []
for user_dict in user_dicts:
user = User.load(user_dict['id'])
if user is None:
raise ValueError('User not found')
if user not in self.contributors:
raise ValueError(
'User {0} not in contributors'.format(user.fullname)
)
permissions = expand_permissions(user_dict['permission'])
if set(permissions) != set(self.get_permissions(user)):
self.set_permissions(user, permissions, save=False)
permissions_changed[user._id] = permissions
# visible must be added before removed to ensure they are validated properly
if user_dict['visible']:
self.set_visible(user,
visible=True,
auth=auth)
else:
visibility_removed.append(user)
users.append(user)
user_ids.append(user_dict['id'])
for user in visibility_removed:
self.set_visible(user,
visible=False,
auth=auth)
for user in self.contributors:
if user._id in user_ids:
to_retain.append(user)
else:
to_remove.append(user)
# TODO: Move to validator or helper @jmcarp
admins = [
user for user in users
if self.has_permission(user, 'admin')
and user.is_registered
]
if users is None or not admins:
raise ValueError(
'Must have at least one registered admin contributor'
)
if to_retain != users:
self.add_log(
action=NodeLog.CONTRIB_REORDERED,
params={
'project': self.parent_id,
'node': self._id,
'contributors': [
user._id
for user in users
],
},
auth=auth,
save=False,
)
if to_remove:
self.remove_contributors(to_remove, auth=auth, save=False)
self.contributors = users
if permissions_changed:
self.add_log(
action=NodeLog.PERMISSIONS_UPDATED,
params={
'project': self.parent_id,
'node': self._id,
'contributors': permissions_changed,
},
auth=auth,
save=False,
)
# Update list of visible IDs
self.update_visible_ids()
if save:
self.save()
with TokuTransaction():
if to_remove or permissions_changed and ['read'] in permissions_changed.values():
project_signals.write_permissions_revoked.send(self)
def add_contributor(self, contributor, permissions=None, visible=True,
auth=None, log=True, save=False):
"""Add a contributor to the project.
:param User contributor: The contributor to be added
:param list permissions: Permissions to grant to the contributor
:param bool visible: Contributor is visible in project dashboard
:param Auth auth: All the auth information including user, API key
:param bool log: Add log to self
:param bool save: Save after adding contributor
:returns: Whether contributor was added
"""
MAX_RECENT_LENGTH = 15
# If user is merged into another account, use master account
contrib_to_add = contributor.merged_by if contributor.is_merged else contributor
if contrib_to_add not in self.contributors:
self.contributors.append(contrib_to_add)
if visible:
self.set_visible(contrib_to_add, visible=True, log=False)
# Add default contributor permissions
permissions = permissions or DEFAULT_CONTRIBUTOR_PERMISSIONS
for permission in permissions:
self.add_permission(contrib_to_add, permission, save=False)
# Add contributor to recently added list for user
if auth is not None:
user = auth.user
if contrib_to_add in user.recently_added:
user.recently_added.remove(contrib_to_add)
user.recently_added.insert(0, contrib_to_add)
while len(user.recently_added) > MAX_RECENT_LENGTH:
user.recently_added.pop()
if log:
self.add_log(
action=NodeLog.CONTRIB_ADDED,
params={
'project': self.parent_id,
'node': self._primary_key,
'contributors': [contrib_to_add._primary_key],
},
auth=auth,
save=False,
)
if save:
self.save()
project_signals.contributor_added.send(self, contributor=contributor)
return True
#Permissions must be overridden if changed when contributor is added to parent he/she is already on a child of.
elif contrib_to_add in self.contributors and permissions is not None:
self.set_permissions(contrib_to_add, permissions)
if save:
self.save()
return False
else:
return False
def add_contributors(self, contributors, auth=None, log=True, save=False):
"""Add multiple contributors
:param list contributors: A list of dictionaries of the form:
{
'user': <User object>,
'permissions': <Permissions list, e.g. ['read', 'write']>,
'visible': <Boolean indicating whether or not user is a bibliographic contributor>
}
:param auth: All the auth information including user, API key.
:param log: Add log to self
:param save: Save after adding contributor
"""
for contrib in contributors:
self.add_contributor(
contributor=contrib['user'], permissions=contrib['permissions'],
visible=contrib['visible'], auth=auth, log=False, save=False,
)
if log and contributors:
self.add_log(
action=NodeLog.CONTRIB_ADDED,
params={
'project': self.parent_id,
'node': self._primary_key,
'contributors': [
contrib['user']._id
for contrib in contributors
],
},
auth=auth,
save=False,
)
if save:
self.save()
def add_unregistered_contributor(self, fullname, email, auth,
permissions=None, save=False):
"""Add a non-registered contributor to the project.
:param str fullname: The full name of the person.
:param str email: The email address of the person.
:param Auth auth: Auth object for the user adding the contributor.
:returns: The added contributor
:raises: DuplicateEmailError if user with given email is already in the database.
"""
# Create a new user record
contributor = User.create_unregistered(fullname=fullname, email=email)
contributor.add_unclaimed_record(node=self, referrer=auth.user,
given_name=fullname, email=email)
try:
contributor.save()
except ValidationValueError: # User with same email already exists
contributor = get_user(email=email)
# Unregistered users may have multiple unclaimed records, so
# only raise error if user is registered.
if contributor.is_registered or self.is_contributor(contributor):
raise
contributor.add_unclaimed_record(node=self, referrer=auth.user,
given_name=fullname, email=email)
contributor.save()
self.add_contributor(
contributor, permissions=permissions, auth=auth,
log=True, save=False,
)
self.save()
return contributor
def set_privacy(self, permissions, auth=None, log=True, save=True, meeting_creation=False):
"""Set the permissions for this node. Also, based on meeting_creation, queues an email to user about abilities of
public projects.
:param permissions: A string, either 'public' or 'private'
:param auth: All the auth information including user, API key.
:param bool log: Whether to add a NodeLog for the privacy change.
:param bool meeting_creation: Whther this was creayed due to a meetings email.
"""
if auth and not self.has_permission(auth.user, ADMIN):
raise PermissionsError('Must be an admin to change privacy settings.')
if permissions == 'public' and not self.is_public:
if self.is_registration:
if self.is_pending_embargo:
raise NodeStateError("A registration with an unapproved embargo cannot be made public.")
if self.embargo_end_date and not self.is_pending_embargo:
self.embargo.state = Embargo.REJECTED
self.embargo.save()
self.is_public = True
elif permissions == 'private' and self.is_public:
if self.is_registration and not self.is_pending_embargo:
raise NodeStateError("Public registrations must be retracted, not made private.")
else:
self.is_public = False
else:
return False
# After set permissions callback
for addon in self.get_addons():
message = addon.after_set_privacy(self, permissions)
if message:
status.push_status_message(message, kind='info', trust=False)
if log:
action = NodeLog.MADE_PUBLIC if permissions == 'public' else NodeLog.MADE_PRIVATE
self.add_log(
action=action,
params={
'project': self.parent_id,
'node': self._primary_key,
},
auth=auth,
save=False,
)
if save:
self.save()
if auth and permissions == 'public':
project_signals.privacy_set_public.send(auth.user, node=self, meeting_creation=meeting_creation)
return True
def admin_public_wiki(self, user):
return (
self.has_addon('wiki') and
self.has_permission(user, 'admin') and
self.is_public
)
def include_wiki_settings(self, user):
"""Check if node meets requirements to make publicly editable."""
return (
self.admin_public_wiki(user) or
any(
each.admin_public_wiki(user)
for each in self.get_descendants_recursive()
)
)
# TODO: Move to wiki add-on
def get_wiki_page(self, name=None, version=None, id=None):
from website.addons.wiki.model import NodeWikiPage
if name:
name = (name or '').strip()
key = to_mongo_key(name)
try:
if version and (isinstance(version, int) or version.isdigit()):
id = self.wiki_pages_versions[key][int(version) - 1]
elif version == 'previous':
id = self.wiki_pages_versions[key][-2]
elif version == 'current' or version is None:
id = self.wiki_pages_current[key]
else:
return None
except (KeyError, IndexError):
return None
return NodeWikiPage.load(id)
# TODO: Move to wiki add-on
def update_node_wiki(self, name, content, auth):
"""Update the node's wiki page with new content.
:param page: A string, the page's name, e.g. ``"home"``.
:param content: A string, the posted content.
:param auth: All the auth information including user, API key.
"""
from website.addons.wiki.model import NodeWikiPage
name = (name or '').strip()
key = to_mongo_key(name)
if key not in self.wiki_pages_current:
if key in self.wiki_pages_versions:
version = len(self.wiki_pages_versions[key]) + 1
else:
version = 1
else:
current = NodeWikiPage.load(self.wiki_pages_current[key])
current.is_current = False
version = current.version + 1
current.save()
new_page = NodeWikiPage(
page_name=name,
version=version,
user=auth.user,
is_current=True,
node=self,
content=content
)
new_page.save()
# check if the wiki page already exists in versions (existed once and is now deleted)
if key not in self.wiki_pages_versions:
self.wiki_pages_versions[key] = []
self.wiki_pages_versions[key].append(new_page._primary_key)
self.wiki_pages_current[key] = new_page._primary_key
self.add_log(
action=NodeLog.WIKI_UPDATED,
params={
'project': self.parent_id,
'node': self._primary_key,
'page': new_page.page_name,
'page_id': new_page._primary_key,
'version': new_page.version,
},
auth=auth,
log_date=new_page.date,
save=False,
)
self.save()
# TODO: Move to wiki add-on
def rename_node_wiki(self, name, new_name, auth):
"""Rename the node's wiki page with new name.
:param name: A string, the page's name, e.g. ``"My Page"``.
:param new_name: A string, the new page's name, e.g. ``"My Renamed Page"``.
:param auth: All the auth information including user, API key.
"""
# TODO: Fix circular imports
from website.addons.wiki.exceptions import (
PageCannotRenameError,
PageConflictError,
PageNotFoundError,
)
name = (name or '').strip()
key = to_mongo_key(name)
new_name = (new_name or '').strip()
new_key = to_mongo_key(new_name)
page = self.get_wiki_page(name)
if key == 'home':
raise PageCannotRenameError('Cannot rename wiki home page')
if not page:
raise PageNotFoundError('Wiki page not found')
if (new_key in self.wiki_pages_current and key != new_key) or new_key == 'home':
raise PageConflictError(
'Page already exists with name {0}'.format(
new_name,
)
)
# rename the page first in case we hit a validation exception.
old_name = page.page_name
page.rename(new_name)
# TODO: merge historical records like update (prevents log breaks)
# transfer the old page versions/current keys to the new name.
if key != new_key:
self.wiki_pages_versions[new_key] = self.wiki_pages_versions[key]
del self.wiki_pages_versions[key]
self.wiki_pages_current[new_key] = self.wiki_pages_current[key]
del self.wiki_pages_current[key]
if key in self.wiki_private_uuids:
self.wiki_private_uuids[new_key] = self.wiki_private_uuids[key]
del self.wiki_private_uuids[key]
self.add_log(
action=NodeLog.WIKI_RENAMED,
params={
'project': self.parent_id,
'node': self._primary_key,
'page': page.page_name,
'page_id': page._primary_key,
'old_page': old_name,
'version': page.version,
},
auth=auth,
save=False,
)
self.save()
def delete_node_wiki(self, name, auth):
name = (name or '').strip()
key = to_mongo_key(name)
page = self.get_wiki_page(key)
del self.wiki_pages_current[key]
self.add_log(
action=NodeLog.WIKI_DELETED,
params={
'project': self.parent_id,
'node': self._primary_key,
'page': page.page_name,
'page_id': page._primary_key,
},
auth=auth,
save=False,
)
self.save()
def get_stats(self, detailed=False):
if detailed:
raise NotImplementedError(
'Detailed stats exist, but are not yet implemented.'
)
else:
return get_basic_counters('node:%s' % self._primary_key)
# TODO: Deprecate this; it duplicates much of what serialize_project already
# does
def serialize(self, auth=None):
"""Dictionary representation of node that is nested within a NodeLog's
representation.
"""
# TODO: incomplete implementation
return {
'id': str(self._primary_key),
'category': self.category_display,
'node_type': self.project_or_component,
'url': self.url,
# TODO: Titles shouldn't contain escaped HTML in the first place
'title': sanitize.unescape_entities(self.title),
'path': self.path_above(auth),
'api_url': self.api_url,
'is_public': self.is_public,
'is_registration': self.is_registration,
}
def _initiate_retraction(self, user, justification=None):
"""Initiates the retraction process for a registration
:param user: User who initiated the retraction
:param justification: Justification, if given, for retraction
"""
retraction = Retraction(
initiated_by=user,
justification=justification or None, # make empty strings None
state=Retraction.UNAPPROVED
)
retraction.save() # Save retraction so it has a primary key
self.retraction = retraction
self.save() # Set foreign field reference Node.retraction
admins = [contrib for contrib in self.contributors if self.has_permission(contrib, 'admin') and contrib.is_active]
for admin in admins:
retraction.add_authorizer(admin)
retraction.save() # Save retraction approval state
return retraction
def retract_registration(self, user, justification=None, save=True):
"""Retract public registration. Instantiate new Retraction object
and associate it with the respective registration.
"""
if not self.is_registration or (not self.is_public and not (self.embargo_end_date or self.is_pending_embargo)):
raise NodeStateError('Only public or embargoed registrations may be retracted.')
if self.root is not self:
raise NodeStateError('Retraction of non-parent registrations is not permitted.')
retraction = self._initiate_retraction(user, justification)
self.registered_from.add_log(
action=NodeLog.RETRACTION_INITIATED,
params={
'node': self._id,
'retraction_id': retraction._id,
},
auth=Auth(user),
)
self.retraction = retraction
if save:
self.save()
def _is_embargo_date_valid(self, end_date):
today = datetime.datetime.utcnow()
if (end_date - today) >= settings.EMBARGO_END_DATE_MIN:
if (end_date - today) <= settings.EMBARGO_END_DATE_MAX:
return True
return False
def _initiate_embargo(self, user, end_date, for_existing_registration=False):
"""Initiates the retraction process for a registration
:param user: User who initiated the retraction
:param end_date: Date when the registration should be made public
"""
embargo = Embargo(
initiated_by=user,
end_date=datetime.datetime.combine(end_date, datetime.datetime.min.time()),
for_existing_registration=for_existing_registration
)
embargo.save() # Save embargo so it has a primary key
self.embargo = embargo
self.save() # Set foreign field reference Node.embargo
admins = [contrib for contrib in self.contributors if self.has_permission(contrib, 'admin') and contrib.is_active]
for admin in admins:
embargo.add_authorizer(admin)
embargo.save() # Save embargo's approval_state
return embargo
def embargo_registration(self, user, end_date, for_existing_registration=False):
"""Enter registration into an embargo period at end of which, it will
be made public
:param user: User initiating the embargo
:param end_date: Date when the registration should be made public
:raises: NodeStateError if Node is not a registration
:raises: PermissionsError if user is not an admin for the Node
:raises: ValidationValueError if end_date is not within time constraints
"""
if not self.is_registration:
raise NodeStateError('Only registrations may be embargoed')
if not self.has_permission(user, 'admin'):
raise PermissionsError('Only admins may embargo a registration')
if not self._is_embargo_date_valid(end_date):
raise ValidationValueError('Embargo end date must be more than one day in the future')
embargo = self._initiate_embargo(user, end_date, for_existing_registration=for_existing_registration)
self.registered_from.add_log(
action=NodeLog.EMBARGO_INITIATED,
params={
'node': self._id,
'embargo_id': embargo._id,
},
auth=Auth(user),
save=True,
)
if self.is_public:
self.set_privacy('private', Auth(user))
def _initiate_approval(self, user):
end_date = datetime.datetime.now() + settings.REGISTRATION_APPROVAL_TIME
approval = RegistrationApproval(
initiated_by=user,
end_date=end_date,
)
approval.save() # Save approval so it has a primary key
self.registration_approval = approval
self.save() # Set foreign field reference Node.registration_approval
admins = [contrib for contrib in self.contributors if self.has_permission(contrib, 'admin') and contrib.is_active]
for admin in admins:
approval.add_authorizer(admin)
approval.save() # Save approval's approval_state
return approval
def require_approval(self, user):
if not self.is_registration:
raise NodeStateError('Only registrations can require registration approval')
if not self.has_permission(user, 'admin'):
raise PermissionsError('Only admins can initiate a registration approval')
approval = self._initiate_approval(user)
self.registered_from.add_log(
action=NodeLog.REGISTRATION_APPROVAL_INITIATED,
params={
'node': self._id,
'registration_approval_id': approval._id,
},
auth=Auth(user),
save=True,
)
# TODO make private?
@Node.subscribe('before_save')
def validate_permissions(schema, instance):
"""Ensure that user IDs in `contributors` and `permissions` match.
"""
node = instance
contributor_ids = set([user._id for user in node.contributors])
permission_ids = set(node.permissions.keys())
mismatched_contributors = contributor_ids.difference(permission_ids)
if mismatched_contributors:
raise ValidationValueError(
'Contributors {0} missing from `permissions` on node {1}'.format(
', '.join(mismatched_contributors),
node._id,
)
)
mismatched_permissions = permission_ids.difference(contributor_ids)
if mismatched_permissions:
raise ValidationValueError(
'Permission keys {0} missing from `contributors` on node {1}'.format(
', '.join(mismatched_contributors),
node._id,
)
)
@Node.subscribe('before_save')
def validate_visible_contributors(schema, instance):
"""Ensure that user IDs in `contributors` and `visible_contributor_ids`
match.
"""
node = instance
for user_id in node.visible_contributor_ids:
if user_id not in node.contributors:
raise ValidationValueError(
('User {0} is in `visible_contributor_ids` but not in '
'`contributors` on node {1}').format(
user_id,
node._id,
)
)
class WatchConfig(StoredObject):
_id = fields.StringField(primary=True, default=lambda: str(ObjectId()))
node = fields.ForeignField('Node', backref='watched')
digest = fields.BooleanField(default=False)
immediate = fields.BooleanField(default=False)
def __repr__(self):
return '<WatchConfig(node="{self.node}")>'.format(self=self)
class PrivateLink(StoredObject):
_id = fields.StringField(primary=True, default=lambda: str(ObjectId()))
date_created = fields.DateTimeField(auto_now_add=datetime.datetime.utcnow)
key = fields.StringField(required=True)
name = fields.StringField()
is_deleted = fields.BooleanField(default=False)
anonymous = fields.BooleanField(default=False)
nodes = fields.ForeignField('node', list=True, backref='shared')
creator = fields.ForeignField('user', backref='created')
@property
def node_ids(self):
node_ids = [node._id for node in self.nodes]
return node_ids
def node_scale(self, node):
# node may be None if previous node's parent is deleted
if node is None or node.parent_id not in self.node_ids:
return -40
else:
offset = 20 if node.parent_node is not None else 0
return offset + self.node_scale(node.parent_node)
def to_json(self):
return {
"id": self._id,
"date_created": iso8601format(self.date_created),
"key": self.key,
"name": sanitize.unescape_entities(self.name),
"creator": {'fullname': self.creator.fullname, 'url': self.creator.profile_url},
"nodes": [{'title': x.title, 'url': x.url, 'scale': str(self.node_scale(x)) + 'px', 'category': x.category}
for x in self.nodes if not x.is_deleted],
"anonymous": self.anonymous
}
class Sanction(StoredObject):
"""Sanction object is a generic way to track approval states"""
abstract = True
UNAPPROVED = 'unapproved'
APPROVED = 'approved'
REJECTED = 'rejected'
DISPLAY_NAME = 'Sanction'
# SHORT_NAME must correspond with the associated foreign field to query against,
# e.g. Node.find_one(Q(sanction.SHORT_NAME, 'eq', sanction))
SHORT_NAME = 'sanction'
APPROVAL_NOT_AUTHORIZED_MESSAGE = 'This user is not authorized to approve this {DISPLAY_NAME}'
APPROVAL_INVALID_TOKEN_MESSAGE = 'Invalid approval token provided for this {DISPLAY_NAME}.'
REJECTION_NOT_AUTHORIZED_MESSAEGE = 'This user is not authorized to reject this {DISPLAY_NAME}'
REJECTION_INVALID_TOKEN_MESSAGE = 'Invalid rejection token provided for this {DISPLAY_NAME}.'
_id = fields.StringField(primary=True, default=lambda: str(ObjectId()))
initiation_date = fields.DateTimeField(auto_now_add=datetime.datetime.utcnow)
end_date = fields.DateTimeField(default=None)
# Sanction subclasses must have an initiated_by field
# initiated_by = fields.ForeignField('user', backref='initiated')
# Expanded: Dictionary field mapping admin IDs their approval status and relevant tokens:
# {
# 'b3k97': {
# 'has_approved': False,
# 'approval_token': 'Pew7wj1Puf7DENUPFPnXSwa1rf3xPN',
# 'rejection_token': 'TwozClTFOic2PYxHDStby94bCQMwJy'}
# }
approval_state = fields.DictionaryField()
# One of 'unapproved', 'approved', or 'rejected'
state = fields.StringField(default='unapproved')
def __repr__(self):
return '<Sanction(end_date={self.end_date}) with _id {self._id}>'.format(self=self)
@property
def pending_approval(self):
return self.state == Sanction.UNAPPROVED
@property
def is_approved(self):
return self.state == Sanction.APPROVED
@property
def is_rejected(self):
return self.state == Sanction.REJECTED
def _validate_authorizer(self, user):
return True
def add_authorizer(self, user, approved=False, save=False):
valid = self._validate_authorizer(user)
if valid and user._id not in self.approval_state:
self.approval_state[user._id] = {
'has_approved': approved,
'approval_token': tokens.encode(
{
'user_id': user._id,
'sanction_id': self._id,
'action': 'approve_{}'.format(self.SHORT_NAME)
}
),
'rejection_token': tokens.encode(
{
'user_id': user._id,
'sanction_id': self._id,
'action': 'reject_{}'.format(self.SHORT_NAME)
}
),
}
if save:
self.save()
return True
return False
def remove_authorizer(self, user):
if user._id not in self.approval_state:
return False
del self.approval_state[user._id]
self.save()
return True
def _on_approve(self, user, token):
if all(authorizer['has_approved'] for authorizer in self.approval_state.values()):
self.state = Sanction.APPROVED
self._on_complete(user)
def _on_reject(self, user, token):
"""Early termination of a Sanction"""
raise NotImplementedError('Sanction subclasses must implement an #_on_reject method')
def _on_complete(self, user):
"""When a Sanction has unanimous approval"""
raise NotImplementedError('Sanction subclasses must implement an #_on_complete method')
def approve(self, user, token):
"""Add user to approval list if user is admin and token verifies."""
try:
if self.approval_state[user._id]['approval_token'] != token:
raise InvalidSanctionApprovalToken(self.APPROVAL_INVALID_TOKEN_MESSAGE.format(DISPLAY_NAME=self.DISPLAY_NAME))
except KeyError:
raise PermissionsError(self.APPROVAL_NOT_AUTHORIZED_MESSAGE.format(DISPLAY_NAME=self.DISPLAY_NAME))
self.approval_state[user._id]['has_approved'] = True
self._on_approve(user, token)
def reject(self, user, token):
"""Cancels sanction if user is admin and token verifies."""
try:
if self.approval_state[user._id]['rejection_token'] != token:
raise InvalidSanctionRejectionToken(self.REJECTION_INVALID_TOKEN_MESSAGE.format(DISPLAY_NAME=self.DISPLAY_NAME))
except KeyError:
raise PermissionsError(self.REJECTION_NOT_AUTHORIZED_MESSAEGE.format(DISPLAY_NAME=self.DISPLAY_NAME))
self.state = Sanction.REJECTED
self._on_reject(user, token)
def forcibly_reject(self):
self.state = Sanction.REJECTED
def _notify_authorizer(self, user):
pass
def _notify_non_authorizer(self, user):
pass
def ask(self, group):
for contrib in group:
if contrib._id in self.approval_state:
self._notify_authorizer(contrib)
else:
self._notify_non_authorizer(contrib)
class EmailApprovableSanction(Sanction):
AUTHORIZER_NOTIFY_EMAIL_TEMPLATE = None
NON_AUTHORIZER_NOTIFY_EMAIL_TEMPLATE = None
VIEW_URL_TEMPLATE = ''
APPROVE_URL_TEMPLATE = ''
REJECT_URL_TEMPLATE = ''
# Store a persistant copy of urls for use when needed outside of a request context.
# This field gets automagically updated whenever models approval_state is modified
# and the model is saved
# {
# 'abcde': {
# 'approve': [APPROVAL_URL],
# 'reject': [REJECT_URL],
# }
# }
stashed_urls = fields.DictionaryField(default=dict)
@staticmethod
def _format_or_empty(template, context):
if context:
return template.format(**context)
return ''
def _view_url(self, user_id):
return self._format_or_empty(self.VIEW_URL_TEMPLATE, self._view_url_context(user_id))
def _view_url_context(self, user_id):
return None
def _approval_url(self, user_id):
return self._format_or_empty(self.APPROVE_URL_TEMPLATE, self._approval_url_context(user_id))
def _approval_url_context(self, user_id):
return None
def _rejection_url(self, user_id):
return self._format_or_empty(self.REJECT_URL_TEMPLATE, self._rejection_url_context(user_id))
def _rejection_url_context(self, user_id):
return None
def _send_approval_request_email(self, user, template, context):
mails.send_mail(
user.username,
template,
user=user,
**context
)
def _email_template_context(self, user, is_authorizer=False):
return {}
def _notify_authorizer(self, authorizer):
context = self._email_template_context(authorizer, is_authorizer=True)
if self.AUTHORIZER_NOTIFY_EMAIL_TEMPLATE:
self._send_approval_request_email(authorizer, self.AUTHORIZER_NOTIFY_EMAIL_TEMPLATE, context)
else:
raise NotImplementedError
def _notify_non_authorizer(self, user):
context = self._email_template_context(user)
if self.NON_AUTHORIZER_NOTIFY_EMAIL_TEMPLATE:
self._send_approval_request_email(user, self.NON_AUTHORIZER_NOTIFY_EMAIL_TEMPLATE, context)
else:
raise NotImplementedError
def add_authorizer(self, user, **kwargs):
super(EmailApprovableSanction, self).add_authorizer(user, **kwargs)
self.stashed_urls[user._id] = {
'view': self._view_url(user._id),
'approve': self._approval_url(user._id),
'reject': self._rejection_url(user._id)
}
self.save()
class Embargo(EmailApprovableSanction):
"""Embargo object for registrations waiting to go public."""
COMPLETED = 'completed'
DISPLAY_NAME = 'Embargo'
SHORT_NAME = 'embargo'
AUTHORIZER_NOTIFY_EMAIL_TEMPLATE = mails.PENDING_EMBARGO_ADMIN
NON_AUTHORIZER_NOTIFY_EMAIL_TEMPLATE = mails.PENDING_EMBARGO_NON_ADMIN
VIEW_URL_TEMPLATE = VIEW_PROJECT_URL_TEMPLATE
APPROVE_URL_TEMPLATE = settings.DOMAIN + 'project/{node_id}/?token={token}'
REJECT_URL_TEMPLATE = settings.DOMAIN + 'project/{node_id}/?token={token}'
initiated_by = fields.ForeignField('user', backref='embargoed')
for_existing_registration = fields.BooleanField(default=False)
@property
def is_completed(self):
return self.state == self.COMPLETED
@property
def embargo_end_date(self):
if self.state == self.APPROVED:
return self.end_date
return False
# NOTE(hrybacki): Old, private registrations are grandfathered and do not
# require to be made public or embargoed. This field differentiates them
# from new registrations entering into an embargo field which should not
# show up in any search related fields.
@property
def pending_registration(self):
return not self.for_existing_registration and self.pending_approval
def __repr__(self):
parent_registration = None
try:
parent_registration = Node.find_one(Q('embargo', 'eq', self))
except NoResultsFound:
pass
return ('<Embargo(parent_registration={0}, initiated_by={1}, '
'end_date={2}) with _id {3}>').format(
parent_registration,
self.initiated_by,
self.end_date,
self._id
)
def _view_url_context(self, user_id):
registration = Node.find_one(Q('embargo', 'eq', self))
return {
'node_id': registration._id
}
def _approval_url_context(self, user_id):
approval_token = self.approval_state.get(user_id, {}).get('approval_token')
if approval_token:
registration = Node.find_one(Q('embargo', 'eq', self))
return {
'node_id': registration._id,
'token': approval_token,
}
def _rejection_url_context(self, user_id):
rejection_token = self.approval_state.get(user_id, {}).get('rejection_token')
if rejection_token:
registration = Node.find_one(Q('embargo', 'eq', self))
return {
'node_id': registration._id,
'token': rejection_token,
}
def _email_template_context(self, user, is_authorizer=False, urls=None):
urls = urls or self.stashed_urls.get(user._id, {})
registration_link = urls.get('view', self._view_url(user._id))
if is_authorizer:
approval_link = urls.get('approve', '')
disapproval_link = urls.get('reject', '')
approval_time_span = settings.EMBARGO_PENDING_TIME.days * 24
registration = Node.find_one(Q('embargo', 'eq', self))
return {
'is_initiator': self.initiated_by == user,
'initiated_by': self.initiated_by.fullname,
'approval_link': approval_link,
'project_name': registration.title,
'disapproval_link': disapproval_link,
'registration_link': registration_link,
'embargo_end_date': self.end_date,
'approval_time_span': approval_time_span,
}
else:
return {
'initiated_by': self.initiated_by.fullname,
'registration_link': registration_link,
'embargo_end_date': self.end_date,
}
def _validate_authorizer(self, user):
registration = Node.find_one(Q('embargo', 'eq', self))
return registration.has_permission(user, ADMIN)
def _on_reject(self, user, token):
parent_registration = Node.find_one(Q('embargo', 'eq', self))
parent_registration.registered_from.add_log(
action=NodeLog.EMBARGO_CANCELLED,
params={
'node': parent_registration._id,
'embargo_id': self._id,
},
auth=Auth(user),
)
# Remove backref to parent project if embargo was for a new registration
if not self.for_existing_registration:
parent_registration.delete_registration_tree(save=True)
parent_registration.registered_from = None
# Delete parent registration if it was created at the time the embargo was initiated
if not self.for_existing_registration:
parent_registration.is_deleted = True
parent_registration.save()
def disapprove_embargo(self, user, token):
"""Cancels retraction if user is admin and token verifies."""
self.reject(user, token)
def _on_complete(self, user):
parent_registration = Node.find_one(Q('embargo', 'eq', self))
parent_registration.registered_from.add_log(
action=NodeLog.EMBARGO_APPROVED,
params={
'node': parent_registration._id,
'embargo_id': self._id,
},
auth=Auth(self.initiated_by),
)
self.save()
def approve_embargo(self, user, token):
"""Add user to approval list if user is admin and token verifies."""
self.approve(user, token)
class Retraction(EmailApprovableSanction):
"""Retraction object for public registrations."""
DISPLAY_NAME = 'Retraction'
SHORT_NAME = 'retraction'
AUTHORIZER_NOTIFY_EMAIL_TEMPLATE = mails.PENDING_RETRACTION_ADMIN
NON_AUTHORIZER_NOTIFY_EMAIL_TEMPLATE = mails.PENDING_RETRACTION_NON_ADMIN
VIEW_URL_TEMPLATE = VIEW_PROJECT_URL_TEMPLATE
APPROVE_URL_TEMPLATE = settings.DOMAIN + 'project/{node_id}/?token={token}'
REJECT_URL_TEMPLATE = settings.DOMAIN + 'project/{node_id}/?token={token}'
initiated_by = fields.ForeignField('user', backref='initiated')
justification = fields.StringField(default=None, validate=MaxLengthValidator(2048))
def __repr__(self):
parent_registration = None
try:
parent_registration = Node.find_one(Q('retraction', 'eq', self))
except NoResultsFound:
pass
return ('<Retraction(parent_registration={0}, initiated_by={1}) '
'with _id {2}>').format(
parent_registration,
self.initiated_by,
self._id
)
def _view_url_context(self, user_id):
registration = Node.find_one(Q('retraction', 'eq', self))
return {
'node_id': registration._id
}
def _approval_url_context(self, user_id):
approval_token = self.approval_state.get(user_id, {}).get('approval_token')
if approval_token:
registration = Node.find_one(Q('retraction', 'eq', self))
return {
'node_id': registration._id,
'token': approval_token,
}
def _rejection_url_context(self, user_id):
rejection_token = self.approval_state.get(user_id, {}).get('rejection_token')
if rejection_token:
registration = Node.find_one(Q('retraction', 'eq', self))
return {
'node_id': registration._id,
'token': rejection_token,
}
def _email_template_context(self, user, is_authorizer=False, urls=None):
urls = urls or self.stashed_urls.get(user._id, {})
registration_link = urls.get('view', self._view_url(user._id))
if is_authorizer:
approval_link = urls.get('approve', '')
disapproval_link = urls.get('reject', '')
approval_time_span = settings.RETRACTION_PENDING_TIME.days * 24
registration = Node.find_one(Q('retraction', 'eq', self))
return {
'is_initiator': self.initiated_by == user,
'initiated_by': self.initiated_by.fullname,
'project_name': registration.title,
'registration_link': registration_link,
'approval_link': approval_link,
'disapproval_link': disapproval_link,
'approval_time_span': approval_time_span,
}
else:
return {
'initiated_by': self.initiated_by.fullname,
'registration_link': registration_link,
}
def _on_reject(self, user, token):
parent_registration = Node.find_one(Q('retraction', 'eq', self))
parent_registration.registered_from.add_log(
action=NodeLog.RETRACTION_CANCELLED,
params={
'node': parent_registration._id,
'retraction_id': self._id,
},
auth=Auth(user),
save=True,
)
def _on_complete(self, user):
parent_registration = Node.find_one(Q('retraction', 'eq', self))
parent_registration.registered_from.add_log(
action=NodeLog.RETRACTION_APPROVED,
params={
'node': parent_registration._id,
'retraction_id': self._id,
},
auth=Auth(self.initiated_by),
)
# Remove any embargoes associated with the registration
if parent_registration.embargo_end_date or parent_registration.is_pending_embargo:
parent_registration.embargo.state = self.REJECTED
parent_registration.registered_from.add_log(
action=NodeLog.EMBARGO_CANCELLED,
params={
'node': parent_registration._id,
'embargo_id': parent_registration.embargo._id,
},
auth=Auth(self.initiated_by),
)
parent_registration.embargo.save()
# Ensure retracted registration is public
if not parent_registration.is_public:
parent_registration.set_privacy('public')
parent_registration.update_search()
# Retraction status is inherited from the root project, so we
# need to recursively update search for every descendant node
# so that retracted subrojects/components don't appear in search
for node in parent_registration.get_descendants_recursive():
node.update_search()
self.save()
def approve_retraction(self, user, token):
self.approve(user, token)
def disapprove_retraction(self, user, token):
self.reject(user, token)
class RegistrationApproval(EmailApprovableSanction):
DISPLAY_NAME = 'Approval'
SHORT_NAME = 'registration_approval'
AUTHORIZER_NOTIFY_EMAIL_TEMPLATE = mails.PENDING_REGISTRATION_ADMIN
NON_AUTHORIZER_NOTIFY_EMAIL_TEMPLATE = mails.PENDING_REGISTRATION_NON_ADMIN
VIEW_URL_TEMPLATE = VIEW_PROJECT_URL_TEMPLATE
APPROVE_URL_TEMPLATE = settings.DOMAIN + 'project/{node_id}/?token={token}'
REJECT_URL_TEMPLATE = settings.DOMAIN + 'project/{node_id}/?token={token}'
initiated_by = fields.ForeignField('user', backref='registration_approved')
def _view_url_context(self, user_id):
registration = Node.find_one(Q('registration_approval', 'eq', self))
return {
'node_id': registration._id
}
def _approval_url_context(self, user_id):
approval_token = self.approval_state.get(user_id, {}).get('approval_token')
if approval_token:
registration = Node.find_one(Q('registration_approval', 'eq', self))
return {
'node_id': registration._id,
'token': approval_token,
}
def _rejection_url_context(self, user_id):
rejection_token = self.approval_state.get(user_id, {}).get('rejection_token')
if rejection_token:
registration = Node.find_one(Q('registration_approval', 'eq', self))
return {
'node_id': registration._id,
'token': rejection_token,
}
def _email_template_context(self, user, is_authorizer=False, urls=None):
urls = urls or self.stashed_urls.get(user._id, {})
registration_link = urls.get('view', self._view_url(user._id))
if is_authorizer:
approval_link = urls.get('approve', '')
disapproval_link = urls.get('reject', '')
approval_time_span = settings.REGISTRATION_APPROVAL_TIME.days * 24
registration = Node.find_one(Q('registration_approval', 'eq', self))
return {
'is_initiator': self.initiated_by == user,
'initiated_by': self.initiated_by.fullname,
'registration_link': registration_link,
'approval_link': approval_link,
'disapproval_link': disapproval_link,
'approval_time_span': approval_time_span,
'project_name': registration.title,
}
else:
return {
'initiated_by': self.initiated_by.fullname,
'registration_link': registration_link,
}
def _add_success_logs(self, node, user):
src = node.registered_from
src.add_log(
action=NodeLog.PROJECT_REGISTERED,
params={
'parent_node': src.parent_id,
'node': src._primary_key,
'registration': node._primary_key,
},
auth=Auth(user),
save=False
)
src.save()
def _on_complete(self, user):
self.state = Sanction.APPROVED
register = Node.find_one(Q('registration_approval', 'eq', self))
registered_from = register.registered_from
auth = Auth(self.initiated_by)
register.set_privacy('public', auth, log=False)
for child in register.get_descendants_recursive(lambda n: n.primary):
child.set_privacy('public', auth, log=False)
# Accounts for system actions where no `User` performs the final approval
auth = Auth(user) if user else None
registered_from.add_log(
action=NodeLog.REGISTRATION_APPROVAL_APPROVED,
params={
'node': registered_from._id,
'registration_approval_id': self._id,
},
auth=auth,
)
for node in register.root.node_and_primary_descendants():
self._add_success_logs(node, user)
node.update_search() # update search if public
self.save()
def _on_reject(self, user, token):
register = Node.find_one(Q('registration_approval', 'eq', self))
registered_from = register.registered_from
register.delete_registration_tree(save=True)
registered_from.add_log(
action=NodeLog.REGISTRATION_APPROVAL_CANCELLED,
params={
'node': register._id,
'registration_approval_id': self._id,
},
auth=Auth(user),
)
| cosenal/osf.io | website/project/model.py | Python | apache-2.0 | 132,422 | [
"VisIt"
] | bb9d1e3eebdd7988375e366dd9b37681ab3a5efe35ae7aa3a77dc2f502dcd060 |
#!/usr/bin/env python
"""
Enable using one or more Storage Elements
Usage:
dirac-admin-allow-se SE1 [SE2 ...]
Example:
$ dirac-admin-allow-se M3PEC-disk
"""
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
__RCSID__ = "$Id$"
import DIRAC
from DIRAC.Core.Base import Script
from DIRAC.Core.Utilities.DIRACScript import DIRACScript
@DIRACScript()
def main():
read = False
write = False
check = False
remove = False
site = ''
mute = False
Script.registerSwitch("r", "AllowRead", " Allow only reading from the storage element")
Script.registerSwitch("w", "AllowWrite", " Allow only writing to the storage element")
Script.registerSwitch("k", "AllowCheck", " Allow only check access to the storage element")
Script.registerSwitch("v", "AllowRemove", " Allow only remove access to the storage element")
Script.registerSwitch("a", "All", " Allow all access to the storage element")
Script.registerSwitch("m", "Mute", " Do not send email")
Script.registerSwitch("S:", "Site=", " Allow all SEs associated to site")
Script.parseCommandLine(ignoreErrors=True)
ses = Script.getPositionalArgs()
for switch in Script.getUnprocessedSwitches():
if switch[0].lower() in ("r", "allowread"):
read = True
if switch[0].lower() in ("w", "allowwrite"):
write = True
if switch[0].lower() in ("k", "allowcheck"):
check = True
if switch[0].lower() in ("v", "allowremove"):
remove = True
if switch[0].lower() in ("a", "all"):
read = True
write = True
check = True
remove = True
if switch[0].lower() in ("m", "mute"):
mute = True
if switch[0].lower() in ("s", "site"):
site = switch[1]
# imports
from DIRAC import gConfig, gLogger
from DIRAC.ConfigurationSystem.Client.Helpers.Operations import Operations
from DIRAC.ConfigurationSystem.Client.Helpers.Resources import getSites
from DIRAC.Core.Security.ProxyInfo import getProxyInfo
from DIRAC.DataManagementSystem.Utilities.DMSHelpers import resolveSEGroup
from DIRAC.Interfaces.API.DiracAdmin import DiracAdmin
from DIRAC.ResourceStatusSystem.Client.ResourceStatus import ResourceStatus
if not (read or write or check or remove):
# No switch was specified, means we need all of them
gLogger.notice("No option given, all accesses will be allowed if they were not")
read = True
write = True
check = True
remove = True
ses = resolveSEGroup(ses)
diracAdmin = DiracAdmin()
errorList = []
setup = gConfig.getValue('/DIRAC/Setup', '')
if not setup:
print('ERROR: Could not contact Configuration Service')
DIRAC.exit(2)
res = getProxyInfo()
if not res['OK']:
gLogger.error('Failed to get proxy information', res['Message'])
DIRAC.exit(2)
userName = res['Value'].get('username')
if not userName:
gLogger.error('Failed to get username for proxy')
DIRAC.exit(2)
if site:
res = getSites()
if not res['OK']:
gLogger.error(res['Message'])
DIRAC.exit(-1)
if site not in res['Value']:
gLogger.error('The provided site (%s) is not known.' % site)
DIRAC.exit(-1)
ses.extend(res['Value']['SE'].replace(' ', '').split(','))
if not ses:
gLogger.error('There were no SEs provided')
DIRAC.exit()
STATUS_TYPES = ["ReadAccess", "WriteAccess", "CheckAccess", "RemoveAccess"]
ALLOWED_STATUSES = ["Unknown", "InActive", "Banned", "Probing", "Degraded"]
statusAllowedDict = {}
for statusType in STATUS_TYPES:
statusAllowedDict[statusType] = []
statusFlagDict = {}
statusFlagDict['ReadAccess'] = read
statusFlagDict['WriteAccess'] = write
statusFlagDict['CheckAccess'] = check
statusFlagDict['RemoveAccess'] = remove
resourceStatus = ResourceStatus()
res = resourceStatus.getElementStatus(ses, "StorageElement")
if not res['OK']:
gLogger.error('Storage Element %s does not exist' % ses)
DIRAC.exit(-1)
reason = 'Forced with dirac-admin-allow-se by %s' % userName
for se, seOptions in res['Value'].items():
# InActive is used on the CS model, Banned is the equivalent in RSS
for statusType in STATUS_TYPES:
if statusFlagDict[statusType]:
if seOptions.get(statusType) == "Active":
gLogger.notice('%s status of %s is already Active' % (statusType, se))
continue
if statusType in seOptions:
if not seOptions[statusType] in ALLOWED_STATUSES:
gLogger.notice('%s option for %s is %s, instead of %s' %
(statusType, se, seOptions['ReadAccess'], ALLOWED_STATUSES))
gLogger.notice('Try specifying the command switches')
else:
resR = resourceStatus.setElementStatus(se, "StorageElement", statusType, 'Active', reason, userName)
if not resR['OK']:
gLogger.fatal("Failed to update %s %s to Active, exit -" % (se, statusType), resR['Message'])
DIRAC.exit(-1)
else:
gLogger.notice("Successfully updated %s %s to Active" % (se, statusType))
statusAllowedDict[statusType].append(se)
totalAllowed = 0
totalAllowedSEs = []
for statusType in STATUS_TYPES:
totalAllowed += len(statusAllowedDict[statusType])
totalAllowedSEs += statusAllowedDict[statusType]
totalAllowedSEs = list(set(totalAllowedSEs))
if not totalAllowed:
gLogger.info("No storage elements were allowed")
DIRAC.exit(-1)
if mute:
gLogger.notice('Email is muted by script switch')
DIRAC.exit(0)
subject = '%s storage elements allowed for use' % len(totalAllowedSEs)
addressPath = 'EMail/Production'
address = Operations().getValue(addressPath, '')
body = ''
if read:
body = "%s\n\nThe following storage elements were allowed for reading:" % body
for se in statusAllowedDict['ReadAccess']:
body = "%s\n%s" % (body, se)
if write:
body = "%s\n\nThe following storage elements were allowed for writing:" % body
for se in statusAllowedDict['WriteAccess']:
body = "%s\n%s" % (body, se)
if check:
body = "%s\n\nThe following storage elements were allowed for checking:" % body
for se in statusAllowedDict['CheckAccess']:
body = "%s\n%s" % (body, se)
if remove:
body = "%s\n\nThe following storage elements were allowed for removing:" % body
for se in statusAllowedDict['RemoveAccess']:
body = "%s\n%s" % (body, se)
if not address:
gLogger.notice("'%s' not defined in Operations, can not send Mail\n" % addressPath, body)
DIRAC.exit(0)
res = diracAdmin.sendMail(address, subject, body)
gLogger.notice('Notifying %s' % address)
if res['OK']:
gLogger.notice(res['Value'])
else:
gLogger.notice(res['Message'])
DIRAC.exit(0)
if __name__ == "__main__":
main()
| yujikato/DIRAC | src/DIRAC/DataManagementSystem/scripts/dirac_admin_allow_se.py | Python | gpl-3.0 | 6,852 | [
"DIRAC"
] | 7a914709b136d88edae52328aaff82366262722ba9b51ab7f26757178135a305 |
from __future__ import print_function, unicode_literals
import os
from shutil import copytree
if __name__ == '__main__':
src = os.path.dirname(os.path.abspath(__file__))
dest = os.path.join(os.path.split(src)[0], 'cdk')
if src != dest:
copytree(src, dest)
os.chdir(dest)
print("""\
#---------------------------------------------------------#
Run
cd cdk
pip install -r requirements.txt
python setup.py develop
clld-unfreeze sqlite.ini
pserve sqlite.ini
or equivalent to start the cdk web app accessible at
http://localhost:6543
#---------------------------------------------------------#
""")
| clld/cdk | fromdump.py | Python | apache-2.0 | 650 | [
"CDK"
] | 7faa75b6fee24873aabb4578a935a14f9c834391ee2050a74a2ac1d07196a44d |
# This file is meant primarily for people who want to see an example
# of how to use part of the OpenBabel API, with a secondary use as a
# set of quick unit tests to make sure there's no strange but obvious
# problem with your OpenBabel setup.
# Regression tests, coverage tests, stress tests, performance tests,
# etc. should not go in this file.
import math
import os
import shutil
import tempfile
import time
import unittest
import warnings
import openbabel as ob
def testfile(name):
return os.path.join("files", name)
class MyTestCase(unittest.TestCase):
def assertClose(self, val, expect):
if expect > 0:
self.assertTrue((expect * 0.9999) < val < (expect * 1.0001), val)
else:
self.assertTrue((expect * 1.0001) < val < (expect * 0.9999), val)
def assertZero(self, val):
self.assertTrue(abs(val) < 0.00001, val)
# Make a temporary directory for use during the "with" context block.
# When finished, remove the directory.
class TempDir(object):
def __init__(self):
self.dirname = None
def __enter__(self):
self.dirname = tempfile.mkdtemp(prefix="ob_py_test")
return self
def __call__(self, name):
return os.path.join(self.dirname, name)
def __exit__(self, exc, value, tb):
shutil.rmtree(self.dirname)
# Some of the API calls generate log messages. I don't want to
# see them when doing the testing.
class SuppressLogging(object):
def __enter__(self):
self.lvl = ob.obErrorLog.GetOutputLevel()
ob.obErrorLog.SetOutputLevel(-1)
return self
def __exit__(self, exc, value, tb):
ob.obErrorLog.SetOutputLevel(self.lvl)
# The plugin system requires that OBConversion be called first.
# This is done once, and it affects the entire system
# Check for that case now
result = ob.OBPlugin.ListAsString("fingerprints")
assert "FP2" not in result, result
ob.OBConversion()
result = ob.OBPlugin.ListAsString("fingerprints")
assert "FP2" in result, result
_smiles_parser = ob.OBConversion()
_smiles_parser.SetInFormat("smi")
_smiles_parser.SetOutFormat("can")
def parse_smiles(smiles):
"parse a SMILES into a molecule"
mol = ob.OBMol()
_smiles_parser.ReadString(mol, smiles)
return mol
def cansmiles(mol):
"return the canonical SMILES for molecule"
return _smiles_parser.WriteString(mol).strip()
def parse_smarts(smarts):
"Parse a SMARTS into an ObSmartsPattern"
pat = ob.OBSmartsPattern()
assert pat.Init(smarts)
return pat
def readfile(filename, filetype):
"Iterate over all molecule records in the given file, with given type"
if "/" not in filename and "\\" not in filename:
filename = testfile(filename)
obconversion = ob.OBConversion()
obconversion.SetInFormat(filetype)
mol = ob.OBMol()
notatend = obconversion.ReadFile(mol, filename)
while notatend:
yield mol
mol.Clear()
notatend = obconversion.Read(mol)
class TestIO(MyTestCase):
def test_read_smiles(self):
it = readfile("FormulaTest.smi", "smi")
mol = it.next()
self.assertEqual(mol.GetTitle(), "CH4")
self.assertTrue(mol.NumAtoms())
mol = it.next()
self.assertEqual(mol.GetTitle(), "C atom")
self.assertTrue(mol.NumAtoms())
def test_read_sdf(self):
it = readfile("cantest.sdf", "sdf")
mol = it.next()
self.assertEqual(mol.GetTitle(), "8978")
self.assertEqual(mol.NumBonds(), 64)
mol = it.next()
self.assertEqual(mol.GetTitle(), "10617")
self.assertEqual(mol.NumBonds(), 40)
def test_read_sdf_gz(self):
it = readfile("ziptest.sdf.gz", "sdf")
mol = it.next()
self.assertEqual(mol.GetTitle(), "ZINC04985529")
self.assertEqual(mol.NumAtoms(), 49)
mol = it.next()
self.assertEqual(mol.GetTitle(), "ZINC01700999")
self.assertEqual(mol.NumAtoms(), 34)
def test_write_smiles(self):
conv = ob.OBConversion()
conv.SetOutFormat("smi")
with TempDir() as tempdir:
mol = parse_smiles("CCO")
mol.SetTitle("#1")
conv.WriteFile(mol, tempdir("blah.smi"))
mol = parse_smiles("[NH4+]")
mol.SetTitle("mol2")
conv.Write(mol)
conv.CloseOutFile()
lines = open(tempdir("blah.smi"), "U").readlines()
self.assertTrue(lines[0] == "CCO\t#1\n" or
lines[0] == "OCC\t#1\n", repr(lines[0]))
self.assertTrue(lines[1] == "[NH4+]\tmol2\n", repr(lines[1]))
def test_write_sdf(self):
conv = ob.OBConversion()
conv.SetOutFormat("sdf")
with TempDir() as tempdir:
mol = parse_smiles("CCO")
mol.SetTitle("#1")
with SuppressLogging():
# XXX For some reason, this generates the warning
# Warning in WriteMolecule No 2D or 3D coordinates exist.
# Any stereochemical information will be lost. To generate
# 2D or 3D coordinates use --gen2D or --gen3d.
# Since not all users of the API will have a --gen2D/--gen3d option,
# that's not always going to be useful. Plus, my test cases
# have no stereochemical information. Oh, and hey - I don't even
# call WriteMolecule directly
conv.WriteFile(mol, tempdir("blah.sdf"))
mol = parse_smiles("[NH4+]")
mol.SetTitle("mol2")
conv.Write(mol)
conv.CloseOutFile()
titles = []
atom_counts = []
for mol in readfile(tempdir("blah.sdf"), "sdf"):
titles.append(mol.GetTitle())
atom_counts.append(mol.NumAtoms())
self.assertEqual(titles, ["#1", "mol2"])
self.assertEqual(atom_counts, [3, 5])
def test_write_inchi(self):
mol = parse_smiles("c1ccccc1O")
conv = ob.OBConversion()
conv.SetOutFormat("inchi")
s = conv.WriteString(mol)
# Note the newline!
self.assertEqual(s, "InChI=1S/C6H6O/c7-6-4-2-1-3-5-6/h1-5,7H\n")
def test_perception_and_canonicalization(self):
mol = parse_smiles("C1=CC=C(O)C=C1")
conv = ob.OBConversion()
# Input does perception. Output is not canonical
conv.SetOutFormat("smi")
s = conv.WriteString(mol)
self.assertEqual(s, "c1ccc(O)cc1\t\n")
conv = ob.OBConversion()
# Perception and canonical generation
conv.SetOutFormat("can")
s = conv.WriteString(mol)
self.assertEqual(s, "Oc1ccccc1\t\n")
class TestPlugins(MyTestCase):
known_types = ["charges", "descriptors", "fingerprints", "forcefields",
"formats", "loaders", "ops"]
def test_known_types(self):
for name in TestPlugins.known_types:
s = ob.OBPlugin.ListAsString(name)
self.assertFalse("not a recognized" in s, s)
v = ob.vectorString()
ob.OBPlugin.ListAsVector(name, None, v)
self.assertTrue(len(v) > 0, list(v))
def test_as_string(self):
s = ob.OBPlugin.ListAsString("fingerprints")
self.assertTrue("FP2" in s, s)
self.assertTrue("FP3" in s, s)
self.assertTrue("MACCS" in s, s)
def test_as_string_unknown_type(self):
s = ob.OBPlugin.ListAsString("qwerty.shrdlu")
self.assertTrue("\nfingerprints\n" in s, s)
self.assertTrue("\nloaders\n" in s, s)
def test_as_vector(self):
v = ob.vectorString()
ob.OBPlugin.ListAsVector("formats", None, v)
formats = set(v)
self.assertTrue("smiles -- SMILES format" in formats, formats)
## def test_list(self):
## # XXX GRR! To capture requires passing a 3rd argument which is a std:ostream
## # I can't figure out how to do that in OpenBabel
## s = ob.OBFingerprint.List("fingerprints").splitlines(True)
## self.assertEquals(s[0], "FP2 Indexes linear fragments up to 7 atoms.\n")
## self.assertEquals(s[1], "FP3 SMARTS patterns specified in the file "
## "patterns.txt\n")
## self.assertEquals(s[2], "FP4 SMARTS patterns specified in the file "
## "SMARTS_InteLigand.txt\n")
## self.assertEquals(s[3], "MACCS SMARTS patterns specified in the file MACCS.txt\n")
## self.assertEquals(len(s)>=4, True, s)
class TestFingerprints(MyTestCase):
def test_descriptions(self):
P = "\nPatternFP is definable"
for name, expected_description in (
("FP2", "Indexes linear fragments up to 7 atoms."),
("FP3", "SMARTS patterns specified in the file patterns.txt" + P),
("FP4", "SMARTS patterns specified in the file SMARTS_InteLigand.txt" + P),
("MACCS", "SMARTS patterns specified in the file MACCS.txt" + P)):
fingerprinter = ob.OBFingerprint.FindFingerprint(name)
self.assertFalse(fingerprinter is None)
self.assertEqual(fingerprinter.GetID(), name)
self.assertEqual(fingerprinter.Description(), expected_description)
# Which supported platforms have non-32-bit integers?
self.assertEqual(fingerprinter.Getbitsperint(), 32)
# XXX I don't think DescribeBits is accessible from Python
# XXX What do I do with the result of GetMap?
# XXX What are "Flags()" for?
def test_fp_words(self):
mol = parse_smiles("c1ccccc1O.C#N.[Ge].C1CCC1")
def next_highest_power_of_two(n):
i = 8
while i < n:
i *= 2
return i
for (name, nbits, v0, v1) in ( ("FP2", 1021, 0, 1),
("FP3", 55, 67108864, 1159170),
("FP4", 307, 2, 0),
# TODO: change my MACCS.txt so it's correct
# then rerun this test and change to the right answer
("MACCS", 166, 2097156, 256),
):
fingerprinter = ob.OBFingerprint.FindFingerprint(name)
v = ob.vectorUnsignedInt()
fingerprinter.GetFingerprint(mol, v)
size = next_highest_power_of_two(nbits)//32 # bits-per-int
self.assertEqual(len(v), size)
self.assertEqual(v[0], v0, (name, v[0], v0))
self.assertEqual(v[1], v1, (name, v[1], v1))
def test_fold(self):
v = ob.vectorUnsignedInt([0x2A, 0x41])
self.assertEqual(len(v), 2)
x = ob.OBFingerprint.FindFingerprint("FP2")
x.Fold(v, 32)
self.assertEqual(len(v), 1)
self.assertEqual(v[0], (0x2A | 0x41))
v = ob.vectorUnsignedInt([0x01, 0x04, 0x20, 0x00])
self.assertEqual(len(v), 4)
x.Fold(v, 64)
self.assertEqual(len(v), 2)
self.assertEqual(v[0], 0x21)
self.assertEqual(v[1], 0x04)
def test_get_set(self):
v = ob.vectorUnsignedInt([1, 6])
# XXX Why does GetBit need an actual instance?
x = ob.OBFingerprint.FindFingerprint("FP2")
self.assertTrue(x.GetBit(v, 0))
for i in range(1, 32):
self.assertFalse(x.GetBit(v, i), i)
self.assertFalse(x.GetBit(v, 32))
self.assertTrue(x.GetBit(v, 33))
self.assertTrue(x.GetBit(v, 34))
self.assertFalse(x.GetBit(v, 35))
x.SetBit(v, 35)
self.assertTrue(x.GetBit(v, 35))
def test_tanimoto(self):
v1 = ob.vectorUnsignedInt([0x1, 0x6])
v2 = ob.vectorUnsignedInt([0x1, 0x7])
x = ob.OBFingerprint.FindFingerprint("FP2")
self.assertEqual(x.Tanimoto(v1, v2), (1 + 2) / (1 + 3 + 0.0))
def test_tanimoto_size_mismatch(self):
v1 = ob.vectorUnsignedInt([0x1, 0x6])
v2 = ob.vectorUnsignedInt([1, 2, 0])
x = ob.OBFingerprint.FindFingerprint("FP2")
self.assertEqual(x.Tanimoto(v1, v2), -1.0)
def test_tanimoto_with_no_set_bits(self):
v1 = ob.vectorUnsignedInt([0, 0, 0, 0])
x = ob.OBFingerprint.FindFingerprint("FP2")
# Again, this is an arbitrary decision by toolkit providers
self.assertEqual(x.Tanimoto(v1, v1), 0.0)
mol_with_many_rings = parse_smiles(
"C1C3C5C7C9C%11C%13C%15C%17." +
"C12C34C56C78C9%10C%11%12C%13%14C%15%16C%17%18." * 11 +
"C2C4C6C8C%10C%12C%14C%16C%18C")
class TestSmarts(MyTestCase):
def test_4_membered_ring(self):
pat = ob.OBSmartsPattern()
self.assertTrue(pat.Init("*1~*~*~*~1"), "failed to Init")
mol = parse_smiles("C1CCCC1")
m = pat.Match(mol)
self.assertFalse(m, "had a match?")
mol = parse_smiles("C1CCC1")
m = pat.Match(mol)
self.assertTrue(m, "no match?")
def test_is_valid_and_test_empty(self):
pat = ob.OBSmartsPattern()
self.assertFalse(pat.IsValid())
self.assertTrue(pat.Empty())
pat.Init("CO")
self.assertTrue(pat.IsValid())
self.assertFalse(pat.Empty())
pat = ob.OBSmartsPattern()
with SuppressLogging():
# This will send message to the error log.
self.assertFalse(pat.Init("=O"))
self.assertFalse(pat.IsValid())
self.assertTrue(pat.Empty())
def test_num_atoms_and_bonds(self):
pat = ob.OBSmartsPattern()
self.assertEqual(pat.NumAtoms(), 0)
self.assertEqual(pat.NumBonds(), 0)
pat.Init("C")
self.assertEqual(pat.NumAtoms(), 1)
self.assertEqual(pat.NumBonds(), 0)
pat.Init("C#N")
self.assertEqual(pat.NumAtoms(), 2)
self.assertEqual(pat.NumBonds(), 1)
pat.Init("c1ccccc1")
self.assertEqual(pat.NumAtoms(), 6)
self.assertEqual(pat.NumBonds(), 6)
def test_basic_match_fails(self):
mol = parse_smiles("c1ccccc1O")
pat = parse_smarts("[#7]")
self.assertFalse(pat.Match(mol))
self.assertEqual(pat.NumMatches(), 0)
def test_basic_match_fails_with_single_flag_set(self):
mol = parse_smiles("c1ccccc1O")
pat = parse_smarts("[#7]")
self.assertFalse(pat.Match(mol, True))
self.assertEqual(pat.NumMatches(), 0)
def test_basic_match(self):
mol = parse_smiles("c1ccccc1O")
pat = parse_smarts("[#6][#8]")
self.assertTrue(pat.Match(mol))
results = pat.GetUMapList()
self.assertEqual(len(results), 1)
self.assertEqual(results[0], (6, 7))
self.assertEqual(pat.NumMatches(), 1)
def test_basic_match_with_two_unique_hits(self):
mol = parse_smiles("c1ccccc1O")
pat = parse_smarts("ccO")
self.assertTrue(pat.Match(mol))
results = pat.GetUMapList()
self.assertEqual(len(results), 2)
results = set(results)
self.assertEqual(results, set([(5, 6, 7), (1, 6, 7)]))
self.assertEqual(results, set(pat.GetMapList()))
self.assertEqual(pat.NumMatches(), 2)
def test_basic_match_with_one_unique_hit(self):
mol = parse_smiles("c1ccccc1O")
pat = parse_smarts("c1ccccc1")
self.assertTrue(pat.Match(mol))
results = pat.GetUMapList()
self.assertEqual(len(results), 1)
self.assertEqual(pat.NumMatches(), 1)
self.assertEqual(set(results[0]), set([1, 2, 3, 4, 5, 6]))
def test_basic_match_with_nonunique_hits(self):
mol = parse_smiles("c1ccccc1O")
pat = parse_smarts("c1ccccc1")
self.assertTrue(pat.Match(mol))
results = pat.GetMapList()
self.assertEqual(len(results), 12)
results = list(map(set, results))
for i in range(12):
self.assertEqual(results[0], results[i])
def test_basic_match_behavior_which_I_did_not_expect(self):
mol = parse_smiles("c1ccccc1O")
pat = parse_smarts("c1ccccc1")
pat.Match(mol)
self.assertEqual(pat.NumMatches(), 12)
results = pat.GetUMapList()
self.assertEqual(pat.NumMatches(), 1)
results = pat.GetMapList()
# I really expected these to be 12.
# It appears the UMapList does an in-place trim.
# XXX Is that the right/expected behavior?
self.assertEqual(pat.NumMatches(), 1)
self.assertEqual(len(results), 1)
pat.Match(mol)
# Here they are 12
results = pat.GetMapList()
self.assertEqual(pat.NumMatches(), 12)
results = pat.GetUMapList()
self.assertEqual(pat.NumMatches(), 1)
self.assertEqual(len(results), 1)
def test_basic_match_with_single_flag_set(self):
# I want something which takes a long time
mol = mol_with_many_rings
pat = parse_smarts("C1CCCCCCCCCCCCC1")
t1 = time.time()
self.assertTrue(pat.Match(mol))
t2 = time.time()
if t2 - t1 > 0.01:
warnings.warn("test_basic_match_with_single_flag_set took too long")
self.assertEqual(len(pat.GetMapList()), 1)
self.assertEqual(len(pat.GetUMapList()), 1)
def test_has_match(self):
mol = mol_with_many_rings
pat = parse_smarts("C1CCCCCCCCCCCCC1")
t1 = time.time()
self.assertTrue(pat.HasMatch(mol))
t2 = time.time()
if t2-t1 > 0.01:
warnings.warn("test_has_match took too long")
def test_vector_match_false(self):
# Create a vector< vector<int> >, wherein the results go
v = ob.vectorvInt()
mol = parse_smiles("c1ccccc1O")
pat = parse_smarts("N")
self.assertEqual(pat.Match(mol, v), 0)
self.assertEqual(len(v), 0)
def test_vector_match(self):
v = ob.vectorvInt()
mol = parse_smiles("c1ccccc1O")
pat = parse_smarts("cO")
self.assertEqual(pat.Match(mol, v), 1)
self.assertEqual(len(v), 1)
self.assertEqual(set(v[0]), set([6, 7]))
def test_vector_match_with_two_hits(self):
v = ob.vectorvInt()
mol = parse_smiles("c1ccccc1O")
pat = parse_smarts("ccO")
self.assertEqual(pat.Match(mol, v), 1)
self.assertEqual(len(v), 2)
results = list(v)
self.assertTrue((5, 6, 7) in results, results)
self.assertTrue((1, 6, 7) in results, results)
def test_vector_match_with_one_unique_hit(self):
mol = parse_smiles("c1ccccc1O")
pat = parse_smarts("c1ccccc1")
v = ob.vectorvInt()
self.assertTrue(pat.Match(mol, v, ob.OBSmartsPattern.AllUnique))
self.assertEqual(len(v), 1)
self.assertEqual(set(v[0]), set([1, 2, 3, 4, 5, 6]))
def test_vector_match_with_single_hit(self):
v = ob.vectorvInt()
mol = parse_smiles("c1ccccc1O")
pat = parse_smarts("ccO")
self.assertEqual(pat.Match(mol, v, ob.OBSmartsPattern.Single), 1)
self.assertEqual(len(v), 1)
result = v[0]
self.assertTrue(result == (5, 6, 7) or result == (1, 6, 7), result)
def test_vector_match_with_all_hits(self):
mol = parse_smiles("c1ccccc1O")
pat = parse_smarts("c1ccccc1")
v = ob.vectorvInt()
self.assertEqual(pat.Match(mol, v, ob.OBSmartsPattern.All), 1)
self.assertEqual(len(v), 12)
expect = set([1, 2, 3, 4, 5, 6])
for x in v:
self.assertEqual(set(x), expect)
def test_bad_smarts(self):
pat = ob.OBSmartsPattern()
# This writes an error to the log
with SuppressLogging():
self.assertFalse(pat.Init("%"))
self.assertEqual(pat.NumAtoms(), 0)
self.assertFalse(pat.IsValid())
def test_replace_with_bad_smarts(self):
pat = ob.OBSmartsPattern()
self.assertTrue(pat.Init("CCCC"))
self.assertEqual(pat.NumAtoms(), 4)
# Re-init and verify that there's an overwrite
# This writes an error to the log
with SuppressLogging():
self.assertFalse(pat.Init("Q"))
self.assertEqual(pat.NumAtoms(), 0)
self.assertFalse(pat.IsValid())
# The BeginMList/EndMList seems broken in Python XXX
class TestDescriptors(MyTestCase):
def test_logp(self):
calc_logp = ob.OBDescriptor.FindType("logP")
mol = parse_smiles("Oc1ccccc1OC")
#mol.AddHydrogens() # doesn't change the results
logp = calc_logp.Predict(mol)
self.assertTrue(abs(logp - 1.4008) <= 0.0001, logp)
def test_tpsa(self):
calc_tpsa = ob.OBDescriptor.FindType("TPSA")
mol = parse_smiles("Oc1ccccc1OC")
#mol.AddHydrogens() # doesn't change the results
tpsa = calc_tpsa.Predict(mol)
self.assertTrue(abs(tpsa - 29.460) <= 0.001, tpsa)
def test_mr(self):
calc_mr = ob.OBDescriptor.FindType("MR")
mol = parse_smiles("Oc1ccccc1OC")
#mol.AddHydrogens() # doesn't change the results
mr = calc_mr.Predict(mol)
self.assertTrue(abs(mr - 34.957) <= 0.001, mr)
def test_gotta_try_them_all(self):
v = ob.vectorString()
ob.OBDescriptor.ListAsVector("descriptors", None, v)
mol = parse_smiles("c1ccccc1O")
for term in v:
name = term.split()[0]
prop_calculator = ob.OBDescriptor.FindType(name)
self.assertFalse(prop_calculator is None, "Could not find " + name)
prop_calculator.Predict(mol)
def add_atom(mol, atomno):
atom = mol.NewAtom()
atom.SetAtomicNum(atomno)
return atom
class TestMolecule(MyTestCase):
def test_mol_iteration(self):
mol = parse_smiles("c12c(O[CH](C1=O)C(C)C)cc1c(c2)ccc(=O)o1")
element_counts = {}
for atom in ob.OBMolAtomIter(mol):
n = atom.GetAtomicNum()
element_counts[n] = element_counts.get(n, 0) + 1
self.assertEqual(element_counts[8], 4)
bond_counts = {}
for bond in ob.OBMolBondIter(mol):
n = bond.GetBondOrder()
if not bond.IsAromatic():
bond_counts[n] = bond_counts.get(n, 0) + 1
self.assertEqual(bond_counts[2], 2)
def test_atom_iteration(self):
mol = parse_smiles("[U](F)(F)(F)[Cl]")
atom = mol.GetAtom(1)
counts = {9: 0, 17: 0}
for bond in ob.OBAtomBondIter(atom):
xatom = bond.GetNbrAtom(atom)
n = xatom.GetAtomicNum()
counts[n] += 1
self.assertEqual(counts, {9: 3, 17: 1})
counts = {9: 0, 17: 0}
for atom in ob.OBAtomAtomIter(atom):
n = atom.GetAtomicNum()
counts[n] += 1
self.assertEqual(counts, {9: 3, 17: 1})
### XXX By symmetry I thought something like this would work
# It does not since there is no ob.OBBondAtomIter
# def test_bond_iteration(self):
# mol = parse_smiles("C#N")
# elements = []
# for atom in ob.OBBondAtomIter(bond):
# elements.append(atom.GetAtomicNum())
# elements.sort()
# self.assertEquals(elements, [6, 7])
# Most people don't do molecule building, so I'm not going to test all the variations
def test_building_a_molecule(self):
mol = ob.OBMol()
C = add_atom(mol, 6)
N = add_atom(mol, 7)
# XXX Why can't I do mol.AddBond(C, N, 3)?
mol.AddBond(C.GetIdx(), N.GetIdx(), 3)
self.assertEqual(C.ImplicitHydrogenCount(), 1)
C.IncrementImplicitValence() # Is this how to increment the implicit hcount?
self.assertEqual(C.ImplicitHydrogenCount(), 2)
conv = ob.OBConversion()
conv.SetOutFormat("can")
s = conv.WriteString(mol).strip()
# XXX How does this work when the ImplicitHydrogenCount is 2?? XXX
self.assertEqual(s, "C#N")
# I can even add an atom this way. (Why are there 2 ways?)
O = ob.OBAtom()
O.SetAtomicNum(8)
mol.AddAtom(O)
O.SetImplicitValence(2)
s = conv.WriteString(mol).strip()
self.assertEqual(s, "C#N.O")
def test_molecule_properties(self):
# Have Cl because the average MW is 35.5 so it's easy to
# tell the difference between the "average isotopic weight"
# and "weight of the most common isotope" answers
mol = parse_smiles("c1ccccc1O.[NH4+].[Cl]")
# 13? That includes the 4 hydrogens in [NH4+], but
# not the implicit hydrogen in c1ccccc1O. I don't get it. XXX
self.assertEqual(mol.NumAtoms(), 13)
self.assertEqual(mol.NumBonds(), 11) # includes the -H bonds
self.assertEqual(mol.NumHvyAtoms(), 9)
self.assertClose(mol.GetMolWt(), 147.6027)
self.assertClose(mol.GetMolWt(True), 147.6027)
self.assertClose(mol.GetMolWt(False), 141.55506)
self.assertClose(mol.GetExactMass(), 147.0451)
self.assertClose(mol.GetExactMass(True), 147.0451)
self.assertClose(mol.GetExactMass(False), 140.998)
self.assertEqual(mol.GetTotalCharge(), 1)
# def test_title(self): # tested in the IO module
def test_get_bond(self):
mol = parse_smiles("C=O")
C = mol.GetAtomById(0)
self.assertEqual(C.GetAtomicNum(), 6)
O = mol.GetAtomById(1)
self.assertEqual(O.GetAtomicNum(), 8)
bond = mol.GetBond(C, O)
self.assertEqual(bond.GetBondOrder(), 2)
def test_formula(self):
mol = parse_smiles("c1ccccc1O.[NH4+]")
# XXX Leaves out the "+"?
self.assertEqual(mol.GetFormula(), "C6H10NO")
# XXX Why are the extra spaces there? "N 1", "O 1" and the terminal " "
self.assertEqual(mol.GetSpacedFormula(), "C 6 H 10 N 1 O 1 ")
self.assertEqual(mol.GetSpacedFormula(0), "C 6 H 10 N 1 O 1 ")
self.assertEqual(mol.GetSpacedFormula(1), "C 6 H 10 N O ")
self.assertEqual(mol.GetSpacedFormula(1, '>'), "C>6>H>10>N>O>")
# It seems that OpenBabel and I have different definitions of "implicit"
self.assertEqual(mol.GetSpacedFormula(0, ' ', 0), "C 6 H 4 N 1 O 1 ")
self.assertEqual(mol.GetSpacedFormula(1, ' ', 0), "C 6 H 4 N O ")
# There's a huge number of properties I've omitted
class TestAtomAndBond(MyTestCase):
def test_atom_properties(self):
mol = parse_smiles("[12CH4-]")
mol.SetTitle("Spam!")
atom = mol.GetAtom(0)
self.assertTrue(atom is None, "GetAtom(0)")
atom = mol.GetAtom(1)
self.assertTrue(atom is not None, "GetAtom(1)")
self.assertEqual(atom.GetAtomicNum(), 6)
self.assertEqual(atom.GetIsotope(), 12)
self.assertEqual(atom.GetFormalCharge(), -1)
self.assertEqual(atom.GetImplicitValence(), 4)
self.assertEqual(atom.GetIdx(), 1)
self.assertEqual(atom.GetIndex(), 0)
self.assertEqual(atom.GetSpinMultiplicity(), 0)
self.assertEqual(atom.GetAtomicMass(), 12.0)
self.assertEqual(atom.GetExactMass(), 12.0)
self.assertEqual(atom.GetValence(), 4)
self.assertEqual(atom.GetHyb(), 3) # sp3
self.assertEqual(atom.GetHvyValence(), 0)
self.assertEqual(atom.GetX(), 0.0)
self.assertEqual(atom.GetY(), 0.0)
self.assertEqual(atom.GetZ(), 0.0)
atom.SetVector(1.25, 2.5, 5.125)
self.assertEqual(atom.x(), 1.25)
self.assertEqual(atom.y(), 2.5)
self.assertEqual(atom.z(), 5.125)
self.assertEqual(atom.ImplicitHydrogenCount(), 0)
self.assertEqual(atom.ExplicitHydrogenCount(), 4)
self.assertEqual(atom.MemberOfRingCount(), 0)
self.assertEqual(atom.MemberOfRingSize(), 0)
self.assertEqual(atom.CountRingBonds(), 0)
# *sigh* I don't like all these silly methods. I would
# rather they be functions.
self.assertTrue(atom.IsCarbon())
self.assertFalse(atom.IsHydrogen())
self.assertTrue(atom.IsCarbon())
self.assertFalse(atom.IsNitrogen())
self.assertFalse(atom.IsOxygen())
self.assertFalse(atom.IsSulfur())
self.assertFalse(atom.IsPhosphorus())
self.assertFalse(atom.IsAromatic())
self.assertFalse(atom.IsInRing())
for i in range(10):
self.assertFalse(atom.IsInRingSize(i))
self.assertFalse(atom.IsNotCorH())
self.assertFalse(atom.IsCarboxylOxygen())
self.assertFalse(atom.IsPhosphateOxygen())
self.assertFalse(atom.IsSulfateOxygen())
self.assertFalse(atom.IsNitroOxygen())
self.assertFalse(atom.IsAmideNitrogen())
self.assertFalse(atom.IsPolarHydrogen())
self.assertFalse(atom.IsNonPolarHydrogen())
self.assertFalse(atom.IsAromaticNOxide())
self.assertFalse(atom.IsChiral())
self.assertFalse(atom.IsAxial())
self.assertFalse(atom.IsHbondAcceptor())
self.assertFalse(atom.IsHbondDonor())
self.assertFalse(atom.IsHbondDonorH())
self.assertFalse(atom.HasBondOfOrder(0))
self.assertTrue(atom.HasBondOfOrder(1))
self.assertFalse(atom.HasBondOfOrder(2))
self.assertFalse(atom.HasBondOfOrder(3))
self.assertEqual(atom.CountBondsOfOrder(1), 4)
self.assertFalse(atom.HasNonSingleBond())
self.assertTrue(atom.HasSingleBond())
self.assertFalse(atom.HasDoubleBond())
self.assertFalse(atom.HasAromaticBond())
# In the 15th or 16th main group (N, O, P, S, ...)
self.assertFalse(atom.IsHeteroatom())
# Whee! This isn't really accessible to Python. XXX
# Should I use ctypes to peer into the object?
# self.assertEquals(atom.GetCoordinate(), ...?)
v = atom.GetVector()
self.assertEqual(v.GetX(), 1.25)
self.assertEqual(v.GetY(), 2.5)
self.assertEqual(v.GetZ(), 5.125)
self.assertClose(atom.GetPartialCharge(), -0.25658)
self.assertTrue(atom.GetParent().GetTitle() == mol.GetTitle(),
"parent is mol")
self.assertFalse(atom.IsAromatic())
atom.SetAromatic()
self.assertTrue(atom.IsAromatic())
atom.UnsetAromatic()
def test_more_atom_properties(self):
mol = parse_smiles("Nc1cc(S)ccc1O")
self.assertTrue(mol.GetAtom(8).CountFreeOxygens())
self.assertFalse(mol.GetAtom(9).CountFreeOxygens())
self.assertTrue(mol.GetAtom(9).ImplicitHydrogenCount())
atom = mol.GetAtom(2)
self.assertEqual(atom.MemberOfRingCount(), 1)
self.assertEqual(atom.MemberOfRingSize(), 6)
self.assertEqual(atom.CountRingBonds(), 2)
self.assertEqual(atom.BOSum(), 4)
self.assertTrue(atom.IsAromatic())
self.assertTrue(atom.IsInRing())
def test_bond_length(self):
mol = parse_smiles("C#N")
C = mol.GetAtom(1)
N = mol.GetAtom(2)
# XXX Why do bonds starts from 0 and not 1
self.assertTrue(mol.GetBond(1) is None)
bond = mol.GetBond(0)
self.assertEqual(bond.GetLength(), 0.0)
N.SetVector(0.0, 1.0, 0.0)
self.assertEqual(bond.GetLength(), 1.0)
length = bond.GetEquibLength()
bond.SetLength(C, length)
self.assertEqual(C.GetX(), 0.0)
self.assertEqual(C.GetY(), 0.0)
self.assertEqual(C.GetZ(), 0.0)
self.assertEqual(N.GetX(), 0.0)
self.assertEqual(N.GetY(), length)
self.assertEqual(N.GetZ(), 0.0)
def test_bond_neighbor(self):
mol = parse_smiles("CNS")
C = mol.GetAtom(1)
N = mol.GetAtom(2)
S = mol.GetAtom(3)
bond = mol.GetBond(C, N)
self.assertEqual(bond.GetNbrAtom(C).GetIdx(), N.GetIdx())
self.assertEqual(bond.GetNbrAtom(N).GetIdx(), C.GetIdx())
# XXX S isn't part of the bond. The docs need to warn about this behavior
self.assertEqual(bond.GetNbrAtom(S), C)
self.assertEqual(bond.GetNbrAtomIdx(C), N.GetIdx())
self.assertEqual(bond.GetNbrAtomIdx(N), C.GetIdx())
# This is the documented failure condition
self.assertEqual(bond.GetNbrAtomIdx(S), bond.GetBeginAtomIdx())
def test_bond_properties(self):
mol = parse_smiles("C#N")
bond = mol.GetBond(0)
self.assertEqual(bond.GetBondOrder(), 3)
self.assertFalse(bond.IsDouble())
self.assertTrue(bond.IsTriple())
bond.SetBondOrder(2)
self.assertEqual(bond.GetBondOrder(), 2)
# It looks like OpenBabel tracks the valences and not the
# hydrogen counts, which is why this works out right.
# Interesting.
smiles = cansmiles(mol)
self.assertEqual(smiles, "C=N")
self.assertFalse(bond.IsAromatic())
self.assertEqual(bond.GetIdx(), 0)
self.assertEqual(bond.GetId(), 0)
self.assertEqual(bond.GetBeginAtomIdx(), 1)
self.assertEqual(bond.GetBeginAtom().GetAtomicNum(), 6)
self.assertEqual(bond.GetEndAtomIdx(), 2)
self.assertEqual(bond.GetEndAtom().GetAtomicNum(), 7)
self.assertFalse(bond.IsAmide())
self.assertFalse(bond.IsPrimaryAmide())
self.assertFalse(bond.IsRotor())
self.assertFalse(bond.IsInRing())
self.assertFalse(bond.IsSecondaryAmide())
self.assertFalse(bond.IsTertiaryAmide())
self.assertFalse(bond.IsEster())
self.assertFalse(bond.IsCarbonyl())
self.assertFalse(bond.IsSingle())
self.assertTrue(bond.IsDouble())
self.assertFalse(bond.IsTriple())
self.assertFalse(bond.IsClosure())
self.assertFalse(bond.IsUp())
self.assertFalse(bond.IsDown())
self.assertFalse(bond.IsWedge())
self.assertFalse(bond.IsHash())
self.assertFalse(bond.IsWedgeOrHash())
self.assertFalse(bond.IsCisOrTrans())
## This returns True, but the test is rather meaningless
# since there are no coordinates.
self.assertTrue(bond.IsDoubleBondGeometry())
def test_more_bond_properties(self):
mol = parse_smiles("Sc1nccc1")
bond = mol.GetBond(2)
self.assertEqual(bond.GetBeginAtom().GetAtomicNum(), 7)
self.assertEqual(bond.GetEndAtom().GetIdx(), 4)
self.assertTrue(bond.IsInRing())
def test_rings(self):
mol = parse_smiles("C12CNCC3C1.C2CCC3")
atom = mol.GetAtom(1)
self.assertTrue(atom.IsInRing())
self.assertTrue(atom.IsInRingSize(6))
self.assertTrue(atom.IsInRingSize(7))
self.assertFalse(atom.IsInRingSize(10))
self.assertEqual(atom.MemberOfRingCount(), 2)
self.assertEqual(atom.MemberOfRingSize(), 6)
self.assertEqual(atom.CountRingBonds(), 3)
sssr = mol.GetSSSR()
self.assertEqual(len(sssr), 2)
ring_info = [(ring.Size(), ring) for ring in sssr]
ring_info.sort()
sizes = [x[0] for x in ring_info]
self.assertEqual(sizes, [6, 7])
ring = ring_info[0][1]
self.assertFalse(ring.IsAromatic())
self.assertEqual(ring.GetType(), "")
# XXX *which* of the non-carbons is the root? That isn't documented
idx = ring.GetRootAtom() # Shouldn't that be "Idx"?
# Since there's only one non-C, it must be the N
atom = mol.GetAtom(idx)
self.assertEqual(atom.GetAtomicNum(), 7)
self.assertTrue(ring.IsMember(atom))
for bond in ob.OBAtomBondIter(atom):
self.assertTrue(ring.IsMember(bond))
self.assertTrue(ring.IsInRing(idx))
lssr = mol.GetLSSR()
self.assertEqual(len(lssr), 2)
sizes = [ring.Size() for ring in lssr]
sizes.sort()
self.assertEqual(sizes, [6, 7])
def test_ring_center_and_normal(self):
mol = parse_smiles("c1ccccc1")
R = 1.5
for i in range(6):
atom = mol.GetAtom(i+1)
atom.SetVector(R*math.cos(2*math.pi*i/6),
R*math.sin(2*math.pi*i/6),
0.0)
for ring in ob.OBMolRingIter(mol):
break
center = ob.vector3()
norm1 = ob.vector3()
norm2 = ob.vector3()
ring.findCenterAndNormal(center, norm1, norm2)
self.assertZero(center.GetX())
self.assertZero(center.GetY())
self.assertZero(center.GetZ())
self.assertZero(norm1.GetX())
self.assertZero(norm1.GetY())
self.assertClose(norm1.GetZ(), 1.0)
self.assertZero(norm2.GetX())
self.assertZero(norm2.GetY())
self.assertClose(norm2.GetZ(), -1.0)
def test_geometry_calculations(self):
mol = parse_smiles("CNOS")
C = mol.GetAtom(1)
N = mol.GetAtom(2)
O = mol.GetAtom(3)
S = mol.GetAtom(4)
C.SetVector(0.0, 0.0, 0.0)
N.SetVector(1.0, 0.0, 0.0)
O.SetVector(1.5, 0.5, 0.0)
S.SetVector(1.0, 1.0, 1.0)
self.assertEqual(C.GetDistance(1), 0.0)
self.assertEqual(C.GetDistance(N), 1.0)
# XXX This returns degrees?!
self.assertClose(C.GetAngle(2, 3), 135.0)
self.assertClose(C.GetAngle(N, O), 135.0)
self.assertEqual(C.GetAngle(C, O), 0.0)
self.assertClose(N.SmallestBondAngle(), 135.0)
self.assertClose(N.AverageBondAngle(), 135.0)
self.assertClose(N.SmallestBondAngle(), 135.0)
self.assertClose(N.AverageBondAngle(), 135.0)
# The molecule also has an angle method, PLUS torsion
self.assertClose(mol.GetAngle(C, N, O), 135.0)
self.assertEqual(mol.GetAngle(C, C, O), 0.0)
self.assertClose(mol.GetTorsion(C, N, O, S), 54.7356)
self.assertTrue(C.IsConnected(N))
self.assertFalse(C.IsConnected(O))
# XXX I don't expect this
self.assertTrue(C.IsConnected(C))
self.assertFalse(C.IsOneThree(S))
self.assertTrue(N.IsOneThree(S))
self.assertTrue(C.IsOneFour(S))
# XXX I don't expect this.
# I think it's a consequence of X.IsConnected(X) == True
self.assertTrue(C.IsOneFour(O))
self.assertTrue(C.IsOneFour(C)) # XXX completely suprising!
def test_HtoMethyl(self):
mol = parse_smiles("[H]Cl")
# If I don't move this atom then I get the message
# *** Open Babel Warning in SetLength
# Atoms are both at the same location, moving out of the way.
mol.GetAtom(2).SetVector(1.5, 0, 0)
atom = mol.GetAtom(1)
self.assertEqual(atom.GetAtomicNum(), 1)
# This triggers some debug code which dumps to cerr
atom.HtoMethyl()
self.assertEqual(atom.GetAtomicNum(), 6)
def test_MatchesSMARTS(self):
# I don't much like this function.
mol = parse_smiles("CCO")
atom = mol.GetAtom(1)
self.assertEqual(atom.MatchesSMARTS("O"), 0)
self.assertEqual(atom.MatchesSMARTS("OCC"), 0)
self.assertEqual(atom.MatchesSMARTS("CC"), 1)
# HasAlphaBetaUnsat
# These values are taken directly from OB's spectrophoretest.cpp
class SpectorphoreTest(MyTestCase):
def assertWithin_0_001(self, val, expect):
assert val > 0
self.assertTrue(abs(val - expect) < 0.001, val)
def _make_mol(self):
mol = ob.OBMol()
def new_atom(eleno):
a = mol.NewAtom()
a.SetAtomicNum(eleno)
return a
atoms = []
atoms.append(new_atom(6))
atoms.append(new_atom(1))
atoms.append(new_atom(9))
atoms.append(new_atom(35))
atoms.append(new_atom(17))
for atom in atoms[1:]:
b = mol.NewBond()
b.SetBegin(atoms[0])
b.SetEnd(atom)
b.SetBondOrder(1)
mol.GetAtom(1).SetVector(-0.013, 1.086, 0.008)
mol.GetAtom(2).SetVector(0.002, -0.004, 0.002)
mol.GetAtom(3).SetVector(1.300, 1.570, -0.002)
mol.GetAtom(4).SetVector(-0.964, 1.737, -1.585)
mol.GetAtom(5).SetVector(-0.857, 1.667, 1.491)
return mol
def test_1(self):
s = ob.OBSpectrophore()
s.SetNormalization(ob.OBSpectrophore.NoNormalization)
s.SetResolution(3.0)
s.SetAccuracy(ob.OBSpectrophore.AngStepSize20)
s.SetStereo(ob.OBSpectrophore.NoStereoSpecificProbes)
r = s.GetSpectrophore(self._make_mol())
C = self.assertWithin_0_001
C(r[ 0], 1.599)
C(r[ 1], 1.577)
C(r[ 2], 1.170)
C(r[ 3], 3.761)
C(r[ 4], 5.175)
C(r[ 5], 5.781)
C(r[ 6], 3.797)
C(r[ 7], 3.713)
C(r[ 8], 4.651)
C(r[ 9], 7.737)
C(r[10], 7.950)
C(r[11], 4.869)
C(r[12], 2.708)
C(r[13], 3.471)
C(r[14], 6.698) # XXX The original code has a bug in the upper bound
C(r[15], 9.486)
C(r[16], 7.668)
C(r[17], 8.882)
C(r[18], 4.900) # XXX The original code is slightly too high in the upper bound
C(r[19], 7.479)
C(r[20], 9.324)
C(r[21], 10.293)
C(r[22], 12.956)
C(r[23], 10.335)
C(r[24], 4.021)
C(r[25], 3.814)
C(r[26], 2.947)
C(r[27], 6.381)
C(r[28], 11.004)
C(r[29], 8.279)
C(r[30], 6.549)
C(r[31], 7.136)
C(r[32], 8.613)
C(r[33], 13.182)
C(r[34], 13.744)
C(r[35], 9.084)
C(r[36], 0.459)
C(r[37], 0.642)
C(r[38], 2.172)
C(r[39], 2.753)
C(r[40], 2.348)
C(r[41], 2.605)
C(r[42], 1.614)
C(r[43], 3.166)
C(r[44], 3.391)
C(r[45], 3.132)
C(r[46], 4.105)
C(r[47], 2.875)
def test_with_increased_accuracy(self):
s = ob.OBSpectrophore()
s.SetNormalization(ob.OBSpectrophore.NoNormalization)
s.SetResolution(3.0)
s.SetAccuracy(ob.OBSpectrophore.AngStepSize5)
s.SetStereo(ob.OBSpectrophore.NoStereoSpecificProbes)
r = s.GetSpectrophore(self._make_mol())
C = self.assertWithin_0_001
C(r[0], 1.6445)
C(r[12], 2.7245)
C(r[24], 4.0435)
C(r[36], 0.4585)
# Look at spectrophoretest.cpp for many more examples.
class TestForceFields(MyTestCase):
def test_plugin(self):
v = ob.vectorString()
ob.OBPlugin.ListAsVector("forcefields", None, v)
# Huh. The plugin system uses case-insensitive lookup
names = [x.split()[0].lower() for x in v]
self.assertTrue("gaff" in names, names)
self.assertTrue("mmff94" in names, names)
self.assertTrue("uff" in names, names)
pFF1 = ob.OBForceField.FindForceField("GAFF")
pFF2 = ob.OBForceField.FindForceField("GafF")
self.assertFalse(pFF1 is None)
self.assertFalse(pFF2 is None)
self.assertEqual(pFF1.GetID(), pFF2.GetID())
def _test_energies(self, plugin_name, expected_results, filename = None):
pFF = ob.OBForceField.FindForceField(plugin_name)
self.assertFalse(pFF is None, "Cannot load " + plugin_name)
if filename is None:
filename = testfile("forcefield.sdf")
for i, mol in enumerate(readfile(filename, "sdf")):
self.assertEqual(pFF.Setup(mol), 1,
"Could not set up forcefield on " + mol.GetTitle())
energy = pFF.Energy(False)
self.assertClose(energy, expected_results[i])
self.assertEqual(pFF.ValidateGradients(), 1,
"gradients do not validate for molecule " + mol.GetTitle())
# These are meant to be fast unit tests, and not a validation suite.
# Therefore I'll only run two tests then exit
if i == 0:
break
# The basis for these tests come from ffghemical.cpp
def test_ghemical_energy_calculation(self):
expected_results = list(map(float, open(testfile("ghemicalresults.txt")).readlines()))
self._test_energies("Ghemical", expected_results)
# The basis for these tests come from ffgaff.cpp
def test_gaff_energy_calculation(self):
expected_results = list(map(float, open(testfile("gaffresults.txt")).readlines()))
self._test_energies("GAFF", expected_results)
# These basis for these tests comes from ffmmff94.cpp
def test_mmff94_energy_calculation(self):
# XXX The MMFF94 ValidateGradients() dumps output to stdout
expected_results = list(map(float, open(testfile("mmff94results.txt")).readlines()))
self._test_energies("MMFF94", expected_results)
## Doing this test does not show anything new about the OpenBabel API
## and it dumps more useless text (for the purposes of testing) to stdout
#expected_results = map(float, open(testfile("more-mmff94results.txt")).readlines())
#self._test_energies("MMFF94", expected_results, testfile("more-mmff94.sdf"))
# These basis for these tests comes from ffuff.cpp
def test_uff_energy_calculation(self):
expected_results = list(map(float, open(testfile("uffresults.txt")).readlines()))
self._test_energies("UFF", expected_results)
# Does not seem to work. Don't know if I'm doing the wrong thing
# def test_mmff94_validates(self):
# pFF = ob.OBForceField.FindForceField("MMFF94")
# self.assertEquals(pFF.Validate(), True)
if __name__ == "__main__":
unittest.main()
| nextmovesoftware/openbabel | scripts/python/examples/dalke_test.py | Python | gpl-2.0 | 45,293 | [
"Open Babel"
] | 8361e5b3a2a727d117801283d49cb476bd2724ba1637898119636f960d6021f4 |
# -*- coding: utf-8 -*-
'''The pluralize and singular methods from the pattern library.
Licenced under the BSD.
See here https://github.com/clips/pattern/blob/master/LICENSE.txt for
complete license information.
'''
import re
VERB, NOUN, ADJECTIVE, ADVERB = "VB", "NN", "JJ", "RB"
#### PLURALIZE #####################################################################################
# Based on "An Algorithmic Approach to English Pluralization" by Damian Conway:
# http://www.csse.monash.edu.au/~damian/papers/HTML/Plurals.html
# Prepositions are used to solve things like
# "mother-in-law" or "man at arms"
plural_prepositions = [
"about", "above", "across", "after", "among", "around", "at", "athwart", "before", "behind",
"below", "beneath", "beside", "besides", "between", "betwixt", "beyond", "but", "by", "during",
"except", "for", "from", "in", "into", "near", "of", "off", "on", "onto", "out", "over",
"since", "till", "to", "under", "until", "unto", "upon", "with"
]
# Inflection rules that are either general,
# or apply to a certain category of words,
# or apply to a certain category of words only in classical mode,
# or apply only in classical mode.
# Each rule consists of:
# suffix, inflection, category and classic flag.
plural_rules = [
# 0) Indefinite articles and demonstratives.
[["^a$|^an$", "some", None, False],
["^this$", "these", None, False],
["^that$", "those", None, False],
["^any$", "all", None, False]
],
# 1) Possessive adjectives.
# Overlaps with 1/ for "his" and "its".
# Overlaps with 2/ for "her".
[["^my$", "our", None, False],
["^your$|^thy$", "your", None, False],
["^her$|^his$|^its$|^their$", "their", None, False]
],
# 2) Possessive pronouns.
[["^mine$", "ours", None, False],
["^yours$|^thine$", "yours", None, False],
["^hers$|^his$|^its$|^theirs$", "theirs", None, False]
],
# 3) Personal pronouns.
[["^I$", "we", None, False],
["^me$", "us", None, False],
["^myself$", "ourselves", None, False],
["^you$", "you", None, False],
["^thou$|^thee$", "ye", None, False],
["^yourself$|^thyself$", "yourself", None, False],
["^she$|^he$|^it$|^they$", "they", None, False],
["^her$|^him$|^it$|^them$", "them", None, False],
["^herself$|^himself$|^itself$|^themself$", "themselves", None, False],
["^oneself$", "oneselves", None, False]
],
# 4) Words that do not inflect.
[["$", "", "uninflected", False],
["$", "", "uncountable", False],
["s$", "s", "s-singular", False],
["fish$", "fish", None, False],
["([- ])bass$", "\\1bass", None, False],
["ois$", "ois", None, False],
["sheep$", "sheep", None, False],
["deer$", "deer", None, False],
["pox$", "pox", None, False],
["([A-Z].*)ese$", "\\1ese", None, False],
["itis$", "itis", None, False],
["(fruct|gluc|galact|lact|ket|malt|rib|sacchar|cellul)ose$", "\\1ose", None, False]
],
# 5) Irregular plurals (mongoose, oxen).
[["atlas$", "atlantes", None, True],
["atlas$", "atlases", None, False],
["beef$", "beeves", None, True],
["brother$", "brethren", None, True],
["child$", "children", None, False],
["corpus$", "corpora", None, True],
["corpus$", "corpuses", None, False],
["^cow$", "kine", None, True],
["ephemeris$", "ephemerides", None, False],
["ganglion$", "ganglia", None, True],
["genie$", "genii", None, True],
["genus$", "genera", None, False],
["graffito$", "graffiti", None, False],
["loaf$", "loaves", None, False],
["money$", "monies", None, True],
["mongoose$", "mongooses", None, False],
["mythos$", "mythoi", None, False],
["octopus$", "octopodes", None, True],
["opus$", "opera", None, True],
["opus$", "opuses", None, False],
["^ox$", "oxen", None, False],
["penis$", "penes", None, True],
["penis$", "penises", None, False],
["soliloquy$", "soliloquies", None, False],
["testis$", "testes", None, False],
["trilby$", "trilbys", None, False],
["turf$", "turves", None, True],
["numen$", "numena", None, False],
["occiput$", "occipita", None, True]
],
# 6) Irregular inflections for common suffixes (synopses, mice, men).
[["man$", "men", None, False],
["person$", "people", None, False],
["([lm])ouse$", "\\1ice", None, False],
["tooth$", "teeth", None, False],
["goose$", "geese", None, False],
["foot$", "feet", None, False],
["zoon$", "zoa", None, False],
["([csx])is$", "\\1es", None, False]
],
# 7) Fully assimilated classical inflections (vertebrae, codices).
[["ex$", "ices", "ex-ices", False],
["ex$", "ices", "ex-ices-classical", True],
["um$", "a", "um-a", False],
["um$", "a", "um-a-classical", True],
["on$", "a", "on-a", False],
["a$", "ae", "a-ae", False],
["a$", "ae", "a-ae-classical", True]
],
# 8) Classical variants of modern inflections (stigmata, soprani).
[["trix$", "trices", None, True],
["eau$", "eaux", None, True],
["ieu$", "ieu", None, True],
["([iay])nx$", "\\1nges", None, True],
["en$", "ina", "en-ina-classical", True],
["a$", "ata", "a-ata-classical", True],
["is$", "ides", "is-ides-classical", True],
["us$", "i", "us-i-classical", True],
["us$", "us", "us-us-classical", True],
["o$", "i", "o-i-classical", True],
["$", "i", "-i-classical", True],
["$", "im", "-im-classical", True]
],
# 9) -ch, -sh and -ss take -es in the plural (churches, classes).
[["([cs])h$", "\\1hes", None, False],
["ss$", "sses", None, False],
["x$", "xes", None, False]
],
# 10) Certain words ending in -f or -fe take -ves in the plural (lives, wolves).
[["([aeo]l)f$", "\\1ves", None, False],
["([^d]ea)f$", "\\1ves", None, False],
["arf$", "arves", None, False],
["([nlw]i)fe$", "\\1ves", None, False],
],
# 11) -y takes -ys if preceded by a vowel or when a proper noun,
# but -ies if preceded by a consonant (storeys, Marys, stories).
[["([aeiou])y$", "\\1ys", None, False],
["([A-Z].*)y$", "\\1ys", None, False],
["y$", "ies", None, False]
],
# 12) Some words ending in -o take -os, the rest take -oes.
# Words in which the -o is preceded by a vowel always take -os (lassos, potatoes, bamboos).
[["o$", "os", "o-os", False],
["([aeiou])o$", "\\1os", None, False],
["o$", "oes", None, False]
],
# 13) Miltary stuff (Major Generals).
[["l$", "ls", "general-generals", False]
],
# 14) Otherwise, assume that the plural just adds -s (cats, programmes).
[["$", "s", None, False]
],
]
# For performance, compile the regular expressions only once:
for ruleset in plural_rules:
for rule in ruleset:
rule[0] = re.compile(rule[0])
# Suffix categories.
plural_categories = {
"uninflected": [
"aircraft", "antelope", "bison", "bream", "breeches", "britches", "carp", "cattle", "chassis",
"clippers", "cod", "contretemps", "corps", "debris", "diabetes", "djinn", "eland", "elk",
"flounder", "gallows", "graffiti", "headquarters", "herpes", "high-jinks", "homework", "innings",
"jackanapes", "mackerel", "measles", "mews", "moose", "mumps", "offspring", "news", "pincers",
"pliers", "proceedings", "rabies", "salmon", "scissors", "series", "shears", "species", "swine",
"trout", "tuna", "whiting", "wildebeest"],
"uncountable": [
"advice", "bread", "butter", "cheese", "electricity", "equipment", "fruit", "furniture",
"garbage", "gravel", "happiness", "information", "ketchup", "knowledge", "love", "luggage",
"mathematics", "mayonnaise", "meat", "mustard", "news", "progress", "research", "rice",
"sand", "software", "understanding", "water"],
"s-singular": [
"acropolis", "aegis", "alias", "asbestos", "bathos", "bias", "caddis", "cannabis", "canvas",
"chaos", "cosmos", "dais", "digitalis", "epidermis", "ethos", "gas", "glottis", "glottis",
"ibis", "lens", "mantis", "marquis", "metropolis", "pathos", "pelvis", "polis", "rhinoceros",
"sassafras", "trellis"],
"ex-ices": ["codex", "murex", "silex"],
"ex-ices-classical": [
"apex", "cortex", "index", "latex", "pontifex", "simplex", "vertex", "vortex"],
"um-a": [
"agendum", "bacterium", "candelabrum", "datum", "desideratum", "erratum", "extremum",
"ovum", "stratum"],
"um-a-classical": [
"aquarium", "compendium", "consortium", "cranium", "curriculum", "dictum", "emporium",
"enconium", "gymnasium", "honorarium", "interregnum", "lustrum", "maximum", "medium",
"memorandum", "millenium", "minimum", "momentum", "optimum", "phylum", "quantum", "rostrum",
"spectrum", "speculum", "stadium", "trapezium", "ultimatum", "vacuum", "velum"],
"on-a": [
"aphelion", "asyndeton", "criterion", "hyperbaton", "noumenon", "organon", "perihelion",
"phenomenon", "prolegomenon"],
"a-ae": ["alga", "alumna", "vertebra"],
"a-ae-classical": [
"abscissa", "amoeba", "antenna", "aurora", "formula", "hydra", "hyperbola", "lacuna",
"medusa", "nebula", "nova", "parabola"],
"en-ina-classical": ["foramen", "lumen", "stamen"],
"a-ata-classical": [
"anathema", "bema", "carcinoma", "charisma", "diploma", "dogma", "drama", "edema", "enema",
"enigma", "gumma", "lemma", "lymphoma", "magma", "melisma", "miasma", "oedema", "sarcoma",
"schema", "soma", "stigma", "stoma", "trauma"],
"is-ides-classical": ["clitoris", "iris"],
"us-i-classical": [
"focus", "fungus", "genius", "incubus", "nimbus", "nucleolus", "radius", "stylus", "succubus",
"torus", "umbilicus", "uterus"],
"us-us-classical": [
"apparatus", "cantus", "coitus", "hiatus", "impetus", "nexus", "plexus", "prospectus",
"sinus", "status"],
"o-i-classical": ["alto", "basso", "canto", "contralto", "crescendo", "solo", "soprano", "tempo"],
"-i-classical": ["afreet", "afrit", "efreet"],
"-im-classical": ["cherub", "goy", "seraph"],
"o-os": [
"albino", "archipelago", "armadillo", "commando", "ditto", "dynamo", "embryo", "fiasco",
"generalissimo", "ghetto", "guano", "inferno", "jumbo", "lingo", "lumbago", "magneto",
"manifesto", "medico", "octavo", "photo", "pro", "quarto", "rhino", "stylo"],
"general-generals": [
"Adjutant", "Brigadier", "Lieutenant", "Major", "Quartermaster",
"adjutant", "brigadier", "lieutenant", "major", "quartermaster"],
}
def pluralize(word, pos=NOUN, custom={}, classical=True):
""" Returns the plural of a given word.
For example: child -> children.
Handles nouns and adjectives, using classical inflection by default
(e.g. where "matrix" pluralizes to "matrices" instead of "matrixes").
The custom dictionary is for user-defined replacements.
"""
if word in custom:
return custom[word]
# Recursion of genitives.
# Remove the apostrophe and any trailing -s,
# form the plural of the resultant noun, and then append an apostrophe (dog's -> dogs').
if word.endswith("'") or word.endswith("'s"):
owner = word.rstrip("'s")
owners = pluralize(owner, pos, custom, classical)
if owners.endswith("s"):
return owners + "'"
else:
return owners + "'s"
# Recursion of compound words
# (Postmasters General, mothers-in-law, Roman deities).
words = word.replace("-", " ").split(" ")
if len(words) > 1:
if words[1] == "general" or words[1] == "General" and \
words[0] not in plural_categories["general-generals"]:
return word.replace(words[0], pluralize(words[0], pos, custom, classical))
elif words[1] in plural_prepositions:
return word.replace(words[0], pluralize(words[0], pos, custom, classical))
else:
return word.replace(words[-1], pluralize(words[-1], pos, custom, classical))
# Only a very few number of adjectives inflect.
n = list(range(len(plural_rules)))
if pos.startswith(ADJECTIVE):
n = [0, 1]
# Apply pluralization rules.
for i in n:
ruleset = plural_rules[i]
for rule in ruleset:
suffix, inflection, category, classic = rule
# A general rule, or a classic rule in classical mode.
if category == None:
if not classic or (classic and classical):
if suffix.search(word) is not None:
return suffix.sub(inflection, word)
# A rule relating to a specific category of words.
if category != None:
if word in plural_categories[category] and (not classic or (classic and classical)):
if suffix.search(word) is not None:
return suffix.sub(inflection, word)
#### SINGULARIZE ###################################################################################
# Adapted from Bermi Ferrer's Inflector for Python:
# http://www.bermi.org/inflector/
# Copyright (c) 2006 Bermi Ferrer Martinez
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software to deal in this software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of this software, and to permit
# persons to whom this software is furnished to do so, subject to the following
# condition:
#
# THIS SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THIS SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THIS SOFTWARE.
singular_rules = [
['(?i)(.)ae$', '\\1a'],
['(?i)(.)itis$', '\\1itis'],
['(?i)(.)eaux$', '\\1eau'],
['(?i)(quiz)zes$', '\\1'],
['(?i)(matr)ices$', '\\1ix'],
['(?i)(ap|vert|ind)ices$', '\\1ex'],
['(?i)^(ox)en', '\\1'],
['(?i)(alias|status)es$', '\\1'],
['(?i)([octop|vir])i$', '\\1us'],
['(?i)(cris|ax|test)es$', '\\1is'],
['(?i)(shoe)s$', '\\1'],
['(?i)(o)es$', '\\1'],
['(?i)(bus)es$', '\\1'],
['(?i)([m|l])ice$', '\\1ouse'],
['(?i)(x|ch|ss|sh)es$', '\\1'],
['(?i)(m)ovies$', '\\1ovie'],
['(?i)(.)ombies$', '\\1ombie'],
['(?i)(s)eries$', '\\1eries'],
['(?i)([^aeiouy]|qu)ies$', '\\1y'],
# Certain words ending in -f or -fe take -ves in the plural (lives, wolves).
["([aeo]l)ves$", "\\1f"],
["([^d]ea)ves$", "\\1f"],
["arves$", "arf"],
["erves$", "erve"],
["([nlw]i)ves$", "\\1fe"],
['(?i)([lr])ves$', '\\1f'],
["([aeo])ves$", "\\1ve"],
['(?i)(sive)s$', '\\1'],
['(?i)(tive)s$', '\\1'],
['(?i)(hive)s$', '\\1'],
['(?i)([^f])ves$', '\\1fe'],
# -es suffix.
['(?i)(^analy)ses$', '\\1sis'],
['(?i)((a)naly|(b)a|(d)iagno|(p)arenthe|(p)rogno|(s)ynop|(t)he)ses$', '\\1\\2sis'],
['(?i)(.)opses$', '\\1opsis'],
['(?i)(.)yses$', '\\1ysis'],
['(?i)(h|d|r|o|n|b|cl|p)oses$', '\\1ose'],
['(?i)(fruct|gluc|galact|lact|ket|malt|rib|sacchar|cellul)ose$', '\\1ose'],
['(?i)(.)oses$', '\\1osis'],
# -a
['(?i)([ti])a$', '\\1um'],
['(?i)(n)ews$', '\\1ews'],
['(?i)s$', ''],
]
# For performance, compile the regular expressions only once:
for rule in singular_rules:
rule[0] = re.compile(rule[0])
singular_uninflected = [
"aircraft", "antelope", "bison", "bream", "breeches", "britches", "carp", "cattle", "chassis",
"christmas", "clippers", "cod", "contretemps", "corps", "debris", "diabetes", "djinn", "eland",
"elk", "flounder", "gallows", "georgia", "graffiti", "headquarters", "herpes", "high-jinks",
"homework", "innings", "jackanapes", "mackerel", "measles", "mews", "moose", "mumps", "news",
"offspring", "pincers", "pliers", "proceedings", "rabies", "salmon", "scissors", "series",
"shears", "species", "swine", "swiss", "trout", "tuna", "whiting", "wildebeest"
]
singular_uncountable = [
"advice", "bread", "butter", "cheese", "electricity", "equipment", "fruit", "furniture",
"garbage", "gravel", "happiness", "information", "ketchup", "knowledge", "love", "luggage",
"mathematics", "mayonnaise", "meat", "mustard", "news", "progress", "research", "rice", "sand",
"software", "understanding", "water"
]
singular_ie = [
"algerie", "auntie", "beanie", "birdie", "bogie", "bombie", "bookie", "collie", "cookie", "cutie",
"doggie", "eyrie", "freebie", "goonie", "groupie", "hankie", "hippie", "hoagie", "hottie",
"indie", "junkie", "laddie", "laramie", "lingerie", "meanie", "nightie", "oldie", "^pie",
"pixie", "quickie", "reverie", "rookie", "softie", "sortie", "stoolie", "sweetie", "techie",
"^tie", "toughie", "valkyrie", "veggie", "weenie", "yuppie", "zombie"
]
singular_irregular = {
"men": "man",
"people": "person",
"children": "child",
"sexes": "sex",
"axes": "axe",
"moves": "move",
"teeth": "tooth",
"geese": "goose",
"feet": "foot",
"zoa": "zoon",
"atlantes": "atlas",
"atlases": "atlas",
"beeves": "beef",
"brethren": "brother",
"children": "child",
"corpora": "corpus",
"corpuses": "corpus",
"kine": "cow",
"ephemerides": "ephemeris",
"ganglia": "ganglion",
"genii": "genie",
"genera": "genus",
"graffiti": "graffito",
"helves": "helve",
"leaves": "leaf",
"loaves": "loaf",
"monies": "money",
"mongooses": "mongoose",
"mythoi": "mythos",
"octopodes": "octopus",
"opera": "opus",
"opuses": "opus",
"oxen": "ox",
"penes": "penis",
"penises": "penis",
"soliloquies": "soliloquy",
"testes": "testis",
"trilbys": "trilby",
"turves": "turf",
"numena": "numen",
"occipita": "occiput",
"our": "my",
}
def singularize(word, pos=NOUN, custom={}):
if word in list(custom.keys()):
return custom[word]
# Recursion of compound words (e.g. mothers-in-law).
if "-" in word:
words = word.split("-")
if len(words) > 1 and words[1] in plural_prepositions:
return singularize(words[0], pos, custom)+"-"+"-".join(words[1:])
# dogs' => dog's
if word.endswith("'"):
return singularize(word[:-1]) + "'s"
lower = word.lower()
for w in singular_uninflected:
if w.endswith(lower):
return word
for w in singular_uncountable:
if w.endswith(lower):
return word
for w in singular_ie:
if lower.endswith(w+"s"):
return w
for w in list(singular_irregular.keys()):
if lower.endswith(w):
return re.sub('(?i)'+w+'$', singular_irregular[w], word)
for rule in singular_rules:
suffix, inflection = rule
match = suffix.search(word)
if match:
groups = match.groups()
for k in range(0, len(groups)):
if groups[k] == None:
inflection = inflection.replace('\\'+str(k+1), '')
return suffix.sub(inflection, word)
return word
| jonmcoe/TextBlob | textblob/en/inflect.py | Python | mit | 19,736 | [
"Elk",
"MOOSE",
"Octopus"
] | 6c7b21f889de5c1f572adec63f6eb42b75605293a970d4a1e5268fc2d8bb31b0 |
"""
Test the about xblock
"""
import datetime
from unittest import mock
from unittest.mock import patch
import ddt
import pytz
from ccx_keys.locator import CCXLocator
from django.conf import settings
from django.test.utils import override_settings
from django.urls import reverse
from edx_toggles.toggles.testutils import override_waffle_flag
from milestones.tests.utils import MilestonesTestCaseMixin
from waffle.testutils import override_switch
from xmodule.course_module import (
CATALOG_VISIBILITY_ABOUT,
CATALOG_VISIBILITY_NONE,
COURSE_VISIBILITY_PRIVATE,
COURSE_VISIBILITY_PUBLIC,
COURSE_VISIBILITY_PUBLIC_OUTLINE
)
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase, SharedModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory
from xmodule.modulestore.tests.utils import TEST_DATA_DIR
from xmodule.modulestore.xml_importer import import_course_from_xml
from common.djangoapps.course_modes.models import CourseMode
from lms.djangoapps.ccx.tests.factories import CcxFactory
from openedx.core.djangoapps.models.course_details import CourseDetails
from openedx.features.course_experience import COURSE_ENABLE_UNENROLLED_ACCESS_FLAG
from openedx.features.course_experience.waffle import ENABLE_COURSE_ABOUT_SIDEBAR_HTML
from openedx.features.course_experience.waffle import WAFFLE_NAMESPACE as COURSE_EXPERIENCE_WAFFLE_NAMESPACE
from lms.djangoapps.course_home_api.toggles import COURSE_HOME_USE_LEGACY_FRONTEND
from common.djangoapps.student.tests.factories import AdminFactory, CourseEnrollmentAllowedFactory, UserFactory
from common.djangoapps.track.tests import EventTrackingTestCase
from common.djangoapps.util.milestones_helpers import get_prerequisite_courses_display, set_prerequisite_courses
from .helpers import LoginEnrollmentTestCase
# HTML for registration button
REG_STR = "<form id=\"class_enroll_form\" method=\"post\" data-remote=\"true\" action=\"/change_enrollment\">"
SHIB_ERROR_STR = "The currently logged-in user account does not have permission to enroll in this course."
@ddt.ddt
@override_waffle_flag(COURSE_HOME_USE_LEGACY_FRONTEND, active=True)
class AboutTestCase(LoginEnrollmentTestCase, SharedModuleStoreTestCase, EventTrackingTestCase, MilestonesTestCaseMixin):
"""
Tests about xblock.
"""
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.course = CourseFactory.create()
cls.course_without_about = CourseFactory.create(catalog_visibility=CATALOG_VISIBILITY_NONE)
cls.course_with_about = CourseFactory.create(catalog_visibility=CATALOG_VISIBILITY_ABOUT)
cls.purchase_course = CourseFactory.create(org='MITx', number='buyme', display_name='Course To Buy')
CourseDetails.update_about_item(cls.course, 'overview', 'OOGIE BLOOGIE', None)
CourseDetails.update_about_item(cls.course_without_about, 'overview', 'WITHOUT ABOUT', None)
CourseDetails.update_about_item(cls.course_with_about, 'overview', 'WITH ABOUT', None)
def setUp(self):
super().setUp()
self.course_mode = CourseMode(
course_id=self.purchase_course.id,
mode_slug=CourseMode.DEFAULT_MODE_SLUG,
mode_display_name=CourseMode.DEFAULT_MODE_SLUG,
min_price=10
)
self.course_mode.save()
def test_anonymous_user(self):
"""
This test asserts that a non-logged in user can visit the course about page
"""
url = reverse('about_course', args=[str(self.course.id)])
resp = self.client.get(url)
self.assertContains(resp, "OOGIE BLOOGIE")
# Check that registration button is present
self.assertContains(resp, REG_STR)
def test_logged_in(self):
"""
This test asserts that a logged-in user can visit the course about page
"""
self.setup_user()
url = reverse('about_course', args=[str(self.course.id)])
resp = self.client.get(url)
self.assertContains(resp, "OOGIE BLOOGIE")
def test_already_enrolled(self):
"""
Asserts that the end user sees the appropriate messaging
when he/she visits the course about page, but is already enrolled
"""
self.setup_user()
self.enroll(self.course, True)
url = reverse('about_course', args=[str(self.course.id)])
resp = self.client.get(url)
self.assertContains(resp, "You are enrolled in this course")
self.assertContains(resp, "View Course")
@override_settings(COURSE_ABOUT_VISIBILITY_PERMISSION="see_about_page")
def test_visible_about_page_settings(self):
"""
Verify that the About Page honors the permission settings in the course module
"""
url = reverse('about_course', args=[str(self.course_with_about.id)])
resp = self.client.get(url)
self.assertContains(resp, "WITH ABOUT")
url = reverse('about_course', args=[str(self.course_without_about.id)])
resp = self.client.get(url)
assert resp.status_code == 404
@patch.dict(settings.FEATURES, {'ENABLE_MKTG_SITE': True})
def test_logged_in_marketing(self):
self.setup_user()
url = reverse('about_course', args=[str(self.course.id)])
resp = self.client.get(url)
# should be redirected
assert resp.status_code == 302
# follow this time, and check we're redirected to the course home page
resp = self.client.get(url, follow=True)
target_url = resp.redirect_chain[-1][0]
course_home_url = reverse('openedx.course_experience.course_home', args=[str(self.course.id)])
assert target_url.endswith(course_home_url)
@patch.dict(settings.FEATURES, {'ENABLE_COURSE_HOME_REDIRECT': False})
@patch.dict(settings.FEATURES, {'ENABLE_MKTG_SITE': True})
def test_logged_in_marketing_without_course_home_redirect(self):
"""
Verify user is not redirected to course home page when
ENABLE_COURSE_HOME_REDIRECT is set to False
"""
self.setup_user()
url = reverse('about_course', args=[str(self.course.id)])
resp = self.client.get(url)
# should not be redirected
self.assertContains(resp, "OOGIE BLOOGIE")
@patch.dict(settings.FEATURES, {'ENABLE_COURSE_HOME_REDIRECT': True})
@patch.dict(settings.FEATURES, {'ENABLE_MKTG_SITE': False})
def test_logged_in_marketing_without_mktg_site(self):
"""
Verify user is not redirected to course home page when
ENABLE_MKTG_SITE is set to False
"""
self.setup_user()
url = reverse('about_course', args=[str(self.course.id)])
resp = self.client.get(url)
# should not be redirected
self.assertContains(resp, "OOGIE BLOOGIE")
@patch.dict(settings.FEATURES, {'ENABLE_PREREQUISITE_COURSES': True})
def test_pre_requisite_course(self):
pre_requisite_course = CourseFactory.create(org='edX', course='900', display_name='pre requisite course')
course = CourseFactory.create(pre_requisite_courses=[str(pre_requisite_course.id)])
self.setup_user()
url = reverse('about_course', args=[str(course.id)])
resp = self.client.get(url)
assert resp.status_code == 200
pre_requisite_courses = get_prerequisite_courses_display(course)
pre_requisite_course_about_url = reverse('about_course', args=[str(pre_requisite_courses[0]['key'])])
assert '<span class="important-dates-item-text pre-requisite"><a href="{}">{}</a></span>'.format(pre_requisite_course_about_url, pre_requisite_courses[0]['display']) in resp.content.decode(resp.charset).strip('\n') # pylint: disable=line-too-long
@patch.dict(settings.FEATURES, {'ENABLE_PREREQUISITE_COURSES': True})
def test_about_page_unfulfilled_prereqs(self):
pre_requisite_course = CourseFactory.create(
org='edX',
course='901',
display_name='pre requisite course',
)
pre_requisite_courses = [str(pre_requisite_course.id)]
# for this failure to occur, the enrollment window needs to be in the past
course = CourseFactory.create(
org='edX',
course='1000',
# closed enrollment
enrollment_start=datetime.datetime(2013, 1, 1),
enrollment_end=datetime.datetime(2014, 1, 1),
start=datetime.datetime(2013, 1, 1),
end=datetime.datetime(2030, 1, 1),
pre_requisite_courses=pre_requisite_courses,
)
set_prerequisite_courses(course.id, pre_requisite_courses)
self.setup_user()
self.enroll(self.course, True)
self.enroll(pre_requisite_course, True)
url = reverse('about_course', args=[str(course.id)])
resp = self.client.get(url)
assert resp.status_code == 200
pre_requisite_courses = get_prerequisite_courses_display(course)
pre_requisite_course_about_url = reverse('about_course', args=[str(pre_requisite_courses[0]['key'])])
assert '<span class="important-dates-item-text pre-requisite"><a href="{}">{}</a></span>'.format(pre_requisite_course_about_url, pre_requisite_courses[0]['display']) in resp.content.decode(resp.charset).strip('\n') # pylint: disable=line-too-long
url = reverse('about_course', args=[str(pre_requisite_course.id)])
resp = self.client.get(url)
assert resp.status_code == 200
@ddt.data(
[COURSE_VISIBILITY_PRIVATE],
[COURSE_VISIBILITY_PUBLIC_OUTLINE],
[COURSE_VISIBILITY_PUBLIC],
)
@ddt.unpack
def test_about_page_public_view(self, course_visibility):
"""
Assert that anonymous or unenrolled users see View Course option
when unenrolled access flag is set
"""
with mock.patch('xmodule.course_module.CourseBlock.course_visibility', course_visibility):
with override_waffle_flag(COURSE_ENABLE_UNENROLLED_ACCESS_FLAG, active=True):
url = reverse('about_course', args=[str(self.course.id)])
resp = self.client.get(url)
if course_visibility == COURSE_VISIBILITY_PUBLIC or course_visibility == COURSE_VISIBILITY_PUBLIC_OUTLINE: # lint-amnesty, pylint: disable=consider-using-in
self.assertContains(resp, "View Course")
else:
self.assertContains(resp, "Enroll Now")
@override_waffle_flag(COURSE_HOME_USE_LEGACY_FRONTEND, active=True)
class AboutTestCaseXML(LoginEnrollmentTestCase, ModuleStoreTestCase):
"""
Tests for the course about page
"""
def setUp(self):
"""
Set up the tests
"""
super().setUp()
# The following test course (which lives at common/test/data/2014)
# is closed; we're testing that an about page still appears when
# the course is already closed
self.xml_course_id = self.store.make_course_key('edX', 'detached_pages', '2014')
import_course_from_xml(
self.store,
self.user.id,
TEST_DATA_DIR,
source_dirs=['2014'],
static_content_store=None,
target_id=self.xml_course_id,
raise_on_failure=True,
create_if_not_present=True,
)
# this text appears in that course's about page
# common/test/data/2014/about/overview.html
self.xml_data = "about page 463139"
@patch.dict('django.conf.settings.FEATURES', {'DISABLE_START_DATES': False})
def test_logged_in_xml(self):
self.setup_user()
url = reverse('about_course', args=[str(self.xml_course_id)])
resp = self.client.get(url)
self.assertContains(resp, self.xml_data)
@patch.dict('django.conf.settings.FEATURES', {'DISABLE_START_DATES': False})
def test_anonymous_user_xml(self):
url = reverse('about_course', args=[str(self.xml_course_id)])
resp = self.client.get(url)
self.assertContains(resp, self.xml_data)
@override_waffle_flag(COURSE_HOME_USE_LEGACY_FRONTEND, active=True)
class AboutWithCappedEnrollmentsTestCase(LoginEnrollmentTestCase, SharedModuleStoreTestCase):
"""
This test case will check the About page when a course has a capped enrollment
"""
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.course = CourseFactory.create(metadata={"max_student_enrollments_allowed": 1})
CourseDetails.update_about_item(cls.course, 'overview', 'OOGIE BLOOGIE', None)
def test_enrollment_cap(self):
"""
This test will make sure that enrollment caps are enforced
"""
self.setup_user()
url = reverse('about_course', args=[str(self.course.id)])
resp = self.client.get(url)
self.assertContains(resp, '<a href="#" class="register">')
self.enroll(self.course, verify=True)
# pylint: disable=attribute-defined-outside-init
# create a new account since the first account is already enrolled in the course
self.email = 'foo_second@test.com'
self.password = 'bar'
self.username = 'test_second'
self.create_account(self.username, self.email, self.password)
self.activate_user(self.email)
self.login(self.email, self.password)
# Get the about page again and make sure that the page says that the course is full
resp = self.client.get(url)
self.assertContains(resp, "Course is full")
# Try to enroll as well
result = self.enroll(self.course)
assert not result
# Check that registration button is not present
self.assertNotContains(resp, REG_STR)
@override_waffle_flag(COURSE_HOME_USE_LEGACY_FRONTEND, active=True)
class AboutWithInvitationOnly(SharedModuleStoreTestCase):
"""
This test case will check the About page when a course is invitation only.
"""
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.course = CourseFactory.create(metadata={"invitation_only": True})
def test_invitation_only(self):
"""
Test for user not logged in, invitation only course.
"""
url = reverse('about_course', args=[str(self.course.id)])
resp = self.client.get(url)
self.assertContains(resp, "Enrollment in this course is by invitation only")
# Check that registration button is not present
self.assertNotContains(resp, REG_STR)
def test_invitation_only_but_allowed(self):
"""
Test for user logged in and allowed to enroll in invitation only course.
"""
# Course is invitation only, student is allowed to enroll and logged in
user = UserFactory.create(username='allowed_student', password='test', email='allowed_student@test.com')
CourseEnrollmentAllowedFactory(email=user.email, course_id=self.course.id)
self.client.login(username=user.username, password='test')
url = reverse('about_course', args=[str(self.course.id)])
resp = self.client.get(url)
self.assertContains(resp, "Enroll Now")
# Check that registration button is present
self.assertContains(resp, REG_STR)
@override_waffle_flag(COURSE_HOME_USE_LEGACY_FRONTEND, active=True)
class AboutWithClosedEnrollment(ModuleStoreTestCase):
"""
This test case will check the About page for a course that has enrollment start/end
set but it is currently outside of that period.
"""
def setUp(self):
super().setUp()
self.course = CourseFactory.create(metadata={"invitation_only": False})
# Setup enrollment period to be in future
now = datetime.datetime.now(pytz.UTC)
tomorrow = now + datetime.timedelta(days=1)
nextday = tomorrow + datetime.timedelta(days=1)
self.course.enrollment_start = tomorrow
self.course.enrollment_end = nextday
self.course = self.update_course(self.course, self.user.id)
def test_closed_enrollmement(self):
url = reverse('about_course', args=[str(self.course.id)])
resp = self.client.get(url)
self.assertContains(resp, "Enrollment is Closed")
# Check that registration button is not present
self.assertNotContains(resp, REG_STR)
def test_course_price_is_not_visible_in_sidebar(self):
url = reverse('about_course', args=[str(self.course.id)])
resp = self.client.get(url)
# course price is not visible ihe course_about page when the course
# mode is not set to honor
self.assertNotContains(resp, '<span class="important-dates-item-text">$10</span>')
@ddt.ddt
@override_waffle_flag(COURSE_HOME_USE_LEGACY_FRONTEND, active=True)
class AboutSidebarHTMLTestCase(SharedModuleStoreTestCase):
"""
This test case will check the About page for the content in the HTML sidebar.
"""
def setUp(self):
super().setUp()
self.course = CourseFactory.create()
@ddt.data(
("", "", False),
("about_sidebar_html", "About Sidebar HTML Heading", False),
("about_sidebar_html", "", False),
("", "", True),
("about_sidebar_html", "About Sidebar HTML Heading", True),
("about_sidebar_html", "", True),
)
@ddt.unpack
def test_html_sidebar_enabled(self, itemfactory_display_name, itemfactory_data, waffle_switch_value):
with override_switch(
'{}.{}'.format(
COURSE_EXPERIENCE_WAFFLE_NAMESPACE,
ENABLE_COURSE_ABOUT_SIDEBAR_HTML
),
active=waffle_switch_value
):
if itemfactory_display_name:
ItemFactory.create(
category="about",
parent_location=self.course.location,
display_name=itemfactory_display_name,
data=itemfactory_data,
)
url = reverse('about_course', args=[str(self.course.id)])
resp = self.client.get(url)
if waffle_switch_value and itemfactory_display_name and itemfactory_data:
self.assertContains(resp, '<section class="about-sidebar-html">')
self.assertContains(resp, itemfactory_data)
else:
self.assertNotContains(resp, '<section class="about-sidebar-html">')
class CourseAboutTestCaseCCX(SharedModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Test for unenrolled student tries to access ccx.
Note: Only CCX coach can enroll a student in CCX. In sum self-registration not allowed.
"""
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.course = CourseFactory.create()
def setUp(self):
super().setUp()
# Create ccx coach account
self.coach = coach = AdminFactory.create(password="test")
self.client.login(username=coach.username, password="test")
@override_waffle_flag(COURSE_HOME_USE_LEGACY_FRONTEND, active=True)
def test_redirect_to_dashboard_unenrolled_ccx(self):
"""
Assert that when unenrolled user tries to access CCX do not allow the user to self-register.
Redirect them to their student dashboard
"""
# create ccx
ccx = CcxFactory(course_id=self.course.id, coach=self.coach)
ccx_locator = CCXLocator.from_course_locator(self.course.id, str(ccx.id))
self.setup_user()
url = reverse('openedx.course_experience.course_home', args=[ccx_locator])
response = self.client.get(url)
expected = reverse('dashboard')
self.assertRedirects(response, expected, status_code=302, target_status_code=200)
| eduNEXT/edx-platform | lms/djangoapps/courseware/tests/test_about.py | Python | agpl-3.0 | 19,717 | [
"VisIt"
] | c05765bde3fcecae1887034755459c6d8cf003e96b91bcda8acdf2254695ec6e |
import doctest
import unittest
import mast.selection as mastsel
import mast.molecule as mastmol
import mast.system as mastsys
import mast.features as mastfeat
import mast.interactions as mastinx
import mast.tests.data as mastdata
import mast.config.molecule as mastmolconfig
import mast.config.system as mastsysconfig
import mast.config.features as mastfeatconfig
import mast.config.interactions as mastinxconfig
class TestFeatureType(unittest.TestCase):
def setUp(self):
self.mock_atom1_attrs = {}
self.mock_atom1_attrs[mastmolconfig.ATOM_ATTRIBUTES[0]] = "mock_attribute"
self.mock_atom1_attrs['undefined_attribute'] = "undefined_mock_attribute"
self.Mock1AtomType = mastmol.AtomType("Mock1AtomType", **self.mock_atom1_attrs)
self.mock_atom2_attrs = {}
self.mock_atom2_attrs[mastmolconfig.ATOM_ATTRIBUTES[0]] = "mock_attribute_2"
self.mock_atom2_attrs['undefined_attribute'] = "undefined_mock_attribute"
self.Mock2AtomType = mastmol.AtomType("Mock2AtomType", **self.mock_atom2_attrs)
self.atom_types = (self.Mock1AtomType, self.Mock2AtomType)
self.mock_bond_attrs = {}
self.mock_bond_attrs[mastmolconfig.BOND_ATTRIBUTES[0]] = "mock_attribute"
self.mock_bond_attrs['undefined_attribute'] = "undefined_mock_attribute"
self.MockBondType = mastmol.BondType("MockBondType",
atom_types=self.atom_types,
**self.mock_bond_attrs)
self.bond_types = [self.MockBondType]
self.mock_attrs = {}
self.bond_map = {0:(0,1)}
self.mock_attrs[mastmolconfig.MOLECULE_ATTRIBUTES[0]] = "mock_attribute"
self.mock_attrs['undefined_attribute'] = "undefined_mock_attribute"
self.MockMoleculeType = mastmol.MoleculeType("MockMoleculeType",
atom_types=self.atom_types,
bond_types=self.bond_types,
bond_map=self.bond_map,
**self.mock_attrs)
def tearDown(self):
pass
def test_factory(self):
atom_idxs = [0]
feature_attrs = {"mock_attribute" : 35}
MockFeatureType = mastfeat.FeatureType("MockFeatureType",
molecule_type=self.MockMoleculeType,
atom_idxs=atom_idxs)
def test_find_features_make_molecule_type(self):
from rdkit import Chem
from rdkit.Chem import AllChem
from mast.interfaces.rdkit import RDKitMoleculeWrapper
# load a string from the data (small-molecule :: sml) as a
# file-like object for reading
sml_path = mastdata.BEN_path
sml_rdkit = Chem.MolFromPDBFile(sml_path, removeHs=False)
wrapper = RDKitMoleculeWrapper(sml_rdkit)
BENType = wrapper.make_molecule_type(find_features=True)
for feature_id, feature in BENType.feature_types.items():
self.assertTrue(feature.attributes == mastfeatconfig.FEATURE_ATTRIBUTES)
for atom_type in feature.atom_types:
self.assertIn(atom_type, BENType.atom_types)
for bond_type in feature.bond_types:
self.assertIn(bond_type, BENType.bond_types)
if __name__ == "__main__":
from mast import features
# doctests
print("\n\n\n Doc Tests\n-----------")
nfail, ntests = doctest.testmod(features, verbose=True)
# unit tests
print("\n\n\n Unit Tests\n-----------")
unittest.main()
| salotz/mast | mastic/tests/test_features.py | Python | mit | 3,721 | [
"RDKit"
] | 9ae827a4f5e7bc5e937622fc142bf196da86160507c46d7fb29fa54a878caa35 |
"""Visualisations of Radio Galaxy Zoo subjects."""
import astropy.io.fits
import matplotlib.colors
import matplotlib.pyplot
import numpy
from . import rgz_data as data
from .config import config
def image(im, contrast=0.05):
"""Plots an RGZ image.
im: NumPy array of an RGZ image.
contrast: Log scale parameter, default 0.05.
-> MatPlotLib image plot.
"""
im = im - im.min() + contrast
return matplotlib.pyplot.imshow(im, origin='lower', cmap='gray',
norm=matplotlib.colors.LogNorm(vmin=im.min(), vmax=im.max()))
def clicks(cs, colour='gray'):
"""Plots a list of RGZ clicks.
Clicks will be flipped and scaled to match the FITS images.
cs: List of (x, y) click tuples.
-> MatPlotLib scatter plot.
"""
cs = (config['surveys']['atlas']['fits_height'] -
numpy.array(cs) * config['surveys']['atlas']['click_to_fits'])
return matplotlib.pyplot.scatter(cs[:, 0], cs[:, 1], color=colour)
def contours(subject, colour='gray'):
"""Plots the contours of a subject.
subject: RGZ subject.
colour: Colour to plot contours in. Default 'gray'.
"""
for row in data.get_contours(subject)['contours']:
for col in row:
xs = []
ys = []
for pair in col['arr']:
xs.append(pair['x'])
ys.append(pair['y'])
ys = config['surveys']['atlas']['fits_height'] - numpy.array(ys)
matplotlib.pyplot.plot(xs, ys, c=colour)
def ir(subject):
"""Plots the IR image of a subject.
subject: RGZ subject.
-> MatPlotLib image plot.
"""
return image(data.get_ir(subject))
def radio(subject):
"""Plots the radio image of a subject.
subject: RGZ subject.
-> MatPlotLib image plot.
"""
return image(data.get_radio(subject))
def subject(s):
"""Shows the IR and contours of a subject.
s: RGZ subject.
"""
ir(s)
contours(s, colour='green')
matplotlib.pyplot.xlim(0, config['surveys']['atlas']['fits_width'])
matplotlib.pyplot.ylim(0, config['surveys']['atlas']['fits_height'])
| chengsoonong/crowdastro | crowdastro/rgz_show.py | Python | mit | 2,107 | [
"Galaxy"
] | c82966024381d243cd706aa090ffc58e7dea19020a28a0c1d3e27071784e06eb |
# Based on https://github.com/probml/pmtk3/blob/master/demos/gaussSeqUpdateSigma1D.m
# Converted by John Fearns - jdf22@infradead.org
# Sequential updating of Sigma in 1d given fixed mean
import superimport
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import norm
from scipy.stats import invgamma
from pyprobml_utils import save_fig
# Ensure stochastic reproducibility.
np.random.seed(4)
# Take 100 samples from a Gaussian with mean 5 and variance 10 as our data.
mu = 5
data = norm.rvs(size=100, loc=mu, scale=np.sqrt(10))
# Assume an uninformative prior on the variance of the Gaussian and knowledge
# that the mean is 5.
# This corresponds to a scaled inverse Chi-squared distribution on the variance
# with 0 degrees of freedom. We don't need any variables to model this.
# Define a function that returns updated scaled-inverse-chi-squared posterior parameters for
# our knowledge of the variance, given observed data and the above prior.
# Returns dof, scale.
def posterior_parameters(data):
n = data.shape[0]
return n, np.sum(np.square(data - mu)) / n
# A function that plots a scaled-inverse-chi-squared distribution given its parameters
def plot_posterior(dof, scaling, colour, linestyle, label):
x = np.arange(0, 16, 0.01)
p = scaled_inverse_chi_squared(dof, scaling).pdf(x)
plt.plot(x, p, color=colour, linestyle=linestyle, linewidth=2, label=label)
def scaled_inverse_chi_squared(dof, scale):
# The scaled inverse Chi-squared distribution with the provided params
# is equal to an inverse-gamma distribution with these parameters:
ig_shape = dof / 2
ig_scale = dof * scale / 2
return invgamma(ig_shape, scale=ig_scale)
# For various first-n observations, plot the posterior.
ns = [2, 5, 50, 100]
colours = ['blue', 'red', 'black', 'green']
linestyles = ['-', ':', '-.', '--']
plt.figure()
for i in range(len(ns)):
n = ns[i]
colour = colours[i]
linestyle = linestyles[i]
label = 'N = {}'.format(n)
plot_data = data[:n]
dof, scaling = posterior_parameters(plot_data)
plot_posterior(dof, scaling, colour, linestyle, label)
plt.title(r'prior = IG($\nu=0$), true $\sigma^2=10$')
plt.xlabel(r"$\sigma^2$")
plt.legend(loc='upper right')
save_fig('gaussSeqUpdateSigma1D.pdf')
plt.show()
| probml/pyprobml | scripts/gauss_seq_update_sigma_1d.py | Python | mit | 2,299 | [
"Gaussian"
] | 5c63e70ed7e1e57f3bb99ca32bcfa7c845e39461bc5267833c9551bd36349628 |
from pyspark import SparkContext
from pyspark import SparkConf
# Initialize Spark context
conf = SparkConf()
sc = SparkContext()
# Turn on quiet(er) logging
def quiet_logs( s ):
logger = s._jvm.org.apache.log4j
logger.LogManager.getLogger("org"). setLevel( logger.Level.ERROR )
logger.LogManager.getLogger("akka").setLevel( logger.Level.ERROR )
quiet_logs(sc);
import os
datasets_path = os.path.join('..', 'datasets')
# Small ratings added to RDD
small_ratings_file = os.path.join(datasets_path, 'ml-latest-small', 'ratings.csv')
small_ratings_raw_data = sc.textFile(small_ratings_file)
small_ratings_raw_data_header = small_ratings_raw_data.take(1)[0]
small_ratings_data = small_ratings_raw_data.filter(lambda line: line!=small_ratings_raw_data_header)\
.map(lambda line: line.split(",")).map(lambda tokens: (tokens[0],tokens[1],tokens[2])).cache()
# Small Movies added to RDD
small_movies_file = os.path.join(datasets_path, 'ml-latest-small', 'movies.csv')
small_movies_raw_data = sc.textFile(small_movies_file)
small_movies_raw_data_header = small_movies_raw_data.take(1)[0]
small_movies_data = small_movies_raw_data.filter(lambda line: line!=small_movies_raw_data_header)\
.map(lambda line: line.split(",")).map(lambda tokens: (tokens[0],tokens[1])).cache()
# Test data sets can be queried
print small_ratings_data.take(3)
print small_movies_data.take(3)
# Training the RDDs
training_RDD, validation_RDD, test_RDD = small_ratings_data.randomSplit([6, 2, 2], seed=0L)
validation_for_predict_RDD = validation_RDD.map(lambda x: (x[0], x[1]))
test_for_predict_RDD = test_RDD.map(lambda x: (x[0], x[1]))
from pyspark.mllib.recommendation import ALS
import math
seed = 5L
iterations = 10
regularization_parameter = 0.1
ranks = [4, 8, 12]
errors = [0, 0, 0]
err = 0
tolerance = 0.02
min_error = float('inf')
best_rank = -1
best_iteration = -1
for rank in ranks:
model = ALS.train(training_RDD, rank, seed=seed, iterations=iterations,
lambda_=regularization_parameter)
predictions = model.predictAll(validation_for_predict_RDD).map(lambda r: ((r[0], r[1]), r[2]))
rates_and_preds = validation_RDD.map(lambda r: ((int(r[0]), int(r[1])), float(r[2]))).join(predictions)
error = math.sqrt(rates_and_preds.map(lambda r: (r[1][0] - r[1][1])**2).mean())
errors[err] = error
err += 1
print 'For rank %s the RMSE is %s' % (rank, error)
if error < min_error:
min_error = error
best_rank = rank
# Print best model
print 'The best model was trained with rank %s' % best_rank
# see that we have predictions
predictions.take(3)
rates_and_preds.take(3)
# test selected model
model = ALS.train(training_RDD, best_rank, seed=seed, iterations=iterations,
lambda_=regularization_parameter)
predictions = model.predictAll(test_for_predict_RDD).map(lambda r: ((r[0], r[1]), r[2]))
rates_and_preds = test_RDD.map(lambda r: ((int(r[0]), int(r[1])), float(r[2]))).join(predictions)
error = math.sqrt(rates_and_preds.map(lambda r: (r[1][0] - r[1][1])**2).mean())
print 'For testing data the RMSE is %s' % (error)
# Load complete ratings dataset
complete_ratings_file = os.path.join(datasets_path, 'ml-latest', 'ratings.csv')
complete_ratings_raw_data = sc.textFile(complete_ratings_file)
complete_ratings_raw_data_header = complete_ratings_raw_data.take(1)[0]
# Parse
complete_ratings_data = complete_ratings_raw_data.filter(lambda line: line!=complete_ratings_raw_data_header)\
.map(lambda line: line.split(",")).map(lambda tokens: (int(tokens[0]),int(tokens[1]),float(tokens[2]))).cache()
print "There are %s recommendations in the complete dataset" % (complete_ratings_data.count())
training_RDD, test_RDD = complete_ratings_data.randomSplit([7, 3], seed=0L)
complete_model = ALS.train(training_RDD, best_rank, seed=seed,
iterations=iterations, lambda_=regularization_parameter)
# Test with big datasettest_for_predict_RDD = test_RDD.map(lambda x: (x[0], x[1]))
predictions = complete_model.predictAll(test_for_predict_RDD).map(lambda r: ((r[0], r[1]), r[2]))
rates_and_preds = test_RDD.map(lambda r: ((int(r[0]), int(r[1])), float(r[2]))).join(predictions)
error = math.sqrt(rates_and_preds.map(lambda r: (r[1][0] - r[1][1])**2).mean())
print 'For testing data the RMSE is %s' % (error)
# Load complete movies dataset
complete_movies_file = os.path.join(datasets_path, 'ml-latest', 'movies.csv')
complete_movies_raw_data = sc.textFile(complete_movies_file)
complete_movies_raw_data_header = complete_movies_raw_data.take(1)[0]
# Parse
complete_movies_data = complete_movies_raw_data.filter(lambda line: line!=complete_movies_raw_data_header)\
.map(lambda line: line.split(",")).map(lambda tokens: (int(tokens[0]),tokens[1],tokens[2])).cache()
complete_movies_titles = complete_movies_data.map(lambda x: (int(x[0]),x[1]))
print "There are %s movies in the complete dataset" % (complete_movies_titles.count())
# count ratings per movie
def get_counts_and_averages(ID_and_ratings_tuple):
nratings = len(ID_and_ratings_tuple[1])
return ID_and_ratings_tuple[0], (nratings, float(sum(x for x in ID_and_ratings_tuple[1]))/nratings)
movie_ID_with_ratings_RDD = (complete_ratings_data.map(lambda x: (x[1], x[2])).groupByKey())
movie_ID_with_avg_ratings_RDD = movie_ID_with_ratings_RDD.map(get_counts_and_averages)
movie_rating_counts_RDD = movie_ID_with_avg_ratings_RDD.map(lambda x: (x[0], x[1][0]))
# Test creating new user and add rating
new_user_ID = 0
# The format of each line is (userID, movieID, rating)
new_user_ratings = [
(0,260,4), # Star Wars (1977)
(0,1,3), # Toy Story (1995)
(0,16,3), # Casino (1995)
(0,25,4), # Leaving Las Vegas (1995)
(0,32,4), # Twelve Monkeys (a.k.a. 12 Monkeys) (1995)
(0,335,1), # Flintstones, The (1994)
(0,379,1), # Timecop (1994)
(0,296,3), # Pulp Fiction (1994)
(0,858,5) , # Godfather, The (1972)
(0,50,4) # Usual Suspects, The (1995)
]
# create rdd with new ratings
new_user_ratings_RDD = sc.parallelize(new_user_ratings)
print 'New user ratings: %s' % new_user_ratings_RDD.take(10)
# union new user rdd with current
complete_data_with_new_ratings_RDD = complete_ratings_data.union(new_user_ratings_RDD)
# Train with new ratings
from time import time
t0 = time()
new_ratings_model = ALS.train(complete_data_with_new_ratings_RDD, best_rank, seed=seed,
iterations=iterations, lambda_=regularization_parameter)
tt = time() - t0
print "New model trained in %s seconds" % round(tt,3)
#get top ratings
new_user_ratings_ids = map(lambda x: x[1], new_user_ratings) # get just movie IDs
# keep just those not on the ID list
new_user_unrated_movies_RDD = (complete_movies_data.filter(lambda x: x[0] not in new_user_ratings_ids).map(lambda x: (new_user_ID, x[0])))
# Use the input RDD, new_user_unrated_movies_RDD, with new_ratings_model.predictAll() to predict new ratings for the movies
new_user_recommendations_RDD = new_ratings_model.predictAll(new_user_unrated_movies_RDD)
# Transform new_user_recommendations_RDD into pairs of the form (Movie ID, Predicted Rating)
new_user_recommendations_rating_RDD = new_user_recommendations_RDD.map(lambda x: (x.product, x.rating))
new_user_recommendations_rating_title_and_count_RDD = \
new_user_recommendations_rating_RDD.join(complete_movies_titles).join(movie_rating_counts_RDD)
# flatten data, make it readable
new_user_recommendations_rating_title_and_count_RDD = \
new_user_recommendations_rating_title_and_count_RDD.map(lambda r: (r[1][0][1], r[1][0][0], r[1][1]))
# get top 25 rated movies
top_movies = new_user_recommendations_rating_title_and_count_RDD.filter(lambda r: r[2]>=25).takeOrdered(25, key=lambda x: -x[1])
print ('TOP recommended movies (with more than 25 reviews):\n%s' %
'\n'.join(map(str, top_movies)))
# How to get individual rating
my_movie = sc.parallelize([(0, 500)]) # Quiz Show (1994)
individual_movie_rating_RDD = new_ratings_model.predictAll(new_user_unrated_movies_RDD)
# print individual_movie_rating_RDD.take(1)
from pyspark.mllib.recommendation import MatrixFactorizationModel
model_path = os.path.join('movie_lens_als')
# Save and load model
model.save(sc, model_path)
same_model = MatrixFactorizationModel.load(sc, model_path)
| mlinsenbard/481FinalProject | movieRecc.py | Python | mit | 8,315 | [
"CASINO"
] | cdc3c6c8fd67f7bac4e0ad4a7593e17142907b4b0532b1d62f7a20b5cd6ff475 |
# copyright 2003-2011 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of logilab-common.
#
# logilab-common is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 2.1 of the License, or (at your option) any
# later version.
#
# logilab-common is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with logilab-common. If not, see <http://www.gnu.org/licenses/>.
"""logilab-pytest is a tool that eases test running and debugging.
To be able to use logilab-pytest, you should either write tests using
the logilab.common.testlib's framework or the unittest module of the
Python's standard library.
You can customize logilab-pytest's behaviour by defining a ``pytestconf.py``
file somewhere in your test directory. In this file, you can add options or
change the way tests are run.
To add command line options, you must define a ``update_parser`` function in
your ``pytestconf.py`` file. The function must accept a single parameter
that will be the OptionParser's instance to customize.
If you wish to customize the tester, you'll have to define a class named
``CustomPyTester``. This class should extend the default `PyTester` class
defined in the logilab.common.pytest module. Take a look at the `PyTester` and
`DjangoTester` classes for more information about what can be done.
For instance, if you wish to add a custom -l option to specify a loglevel, you
could define the following ``pytestconf.py`` file ::
import logging
from logilab.common.pytest import PyTester
def update_parser(parser):
parser.add_option('-l', '--loglevel', dest='loglevel', action='store',
choices=('debug', 'info', 'warning', 'error', 'critical'),
default='critical', help="the default log level possible choices are "
"('debug', 'info', 'warning', 'error', 'critical')")
return parser
class CustomPyTester(PyTester):
def __init__(self, cvg, options):
super(CustomPyTester, self).__init__(cvg, options)
loglevel = options.loglevel.upper()
logger = logging.getLogger('erudi')
logger.setLevel(logging.getLevelName(loglevel))
In your TestCase class you can then get the value of a specific option with
the ``optval`` method::
class MyTestCase(TestCase):
def test_foo(self):
loglevel = self.optval('loglevel')
# ...
You can also tag your tag your test for fine filtering
With those tag::
from logilab.common.testlib import tag, TestCase
class Exemple(TestCase):
@tag('rouge', 'carre')
def toto(self):
pass
@tag('carre', 'vert')
def tata(self):
pass
@tag('rouge')
def titi(test):
pass
you can filter the function with a simple python expression
* ``toto`` and ``titi`` match ``rouge``
* ``toto``, ``tata`` and ``titi``, match ``rouge or carre``
* ``tata`` and ``titi`` match``rouge ^ carre``
* ``titi`` match ``rouge and not carre``
"""
from __future__ import print_function
__docformat__ = "restructuredtext en"
PYTEST_DOC = """%prog [OPTIONS] [testfile [testpattern]]
examples:
logilab-pytest path/to/mytests.py
logilab-pytest path/to/mytests.py TheseTests
logilab-pytest path/to/mytests.py TheseTests.test_thisone
logilab-pytest path/to/mytests.py -m '(not long and database) or regr'
logilab-pytest one (will run both test_thisone and test_thatone)
logilab-pytest path/to/mytests.py -s not (will skip test_notthisone)
"""
ENABLE_DBC = False
FILE_RESTART = ".pytest.restart"
import os, sys, re
import os.path as osp
from time import time, clock
import warnings
import types
import inspect
import traceback
from inspect import isgeneratorfunction, isclass
from random import shuffle
from itertools import dropwhile
from logilab.common.deprecation import deprecated
from logilab.common.fileutils import abspath_listdir
from logilab.common import textutils
from logilab.common import testlib, STD_BLACKLIST
# use the same unittest module as testlib
from logilab.common.testlib import unittest, start_interactive_mode
from logilab.common.testlib import nocoverage, pause_trace, replace_trace # bwcompat
from logilab.common.debugger import Debugger, colorize_source
import doctest
import unittest as unittest_legacy
if not getattr(unittest_legacy, "__package__", None):
try:
import unittest2.suite as unittest_suite
except ImportError:
sys.exit("You have to install python-unittest2 to use this module")
else:
import unittest.suite as unittest_suite
try:
import django
from logilab.common.modutils import modpath_from_file, load_module_from_modpath
DJANGO_FOUND = True
except ImportError:
DJANGO_FOUND = False
CONF_FILE = 'pytestconf.py'
TESTFILE_RE = re.compile("^((unit)?test.*|smoketest)\.py$")
def this_is_a_testfile(filename):
"""returns True if `filename` seems to be a test file"""
return TESTFILE_RE.match(osp.basename(filename))
TESTDIR_RE = re.compile("^(unit)?tests?$")
def this_is_a_testdir(dirpath):
"""returns True if `filename` seems to be a test directory"""
return TESTDIR_RE.match(osp.basename(dirpath))
def load_pytest_conf(path, parser):
"""loads a ``pytestconf.py`` file and update default parser
and / or tester.
"""
namespace = {}
exec(open(path, 'rb').read(), namespace)
if 'update_parser' in namespace:
namespace['update_parser'](parser)
return namespace.get('CustomPyTester', PyTester)
def project_root(parser, projdir=os.getcwd()):
"""try to find project's root and add it to sys.path"""
previousdir = curdir = osp.abspath(projdir)
testercls = PyTester
conf_file_path = osp.join(curdir, CONF_FILE)
if osp.isfile(conf_file_path):
testercls = load_pytest_conf(conf_file_path, parser)
while this_is_a_testdir(curdir) or \
osp.isfile(osp.join(curdir, '__init__.py')):
newdir = osp.normpath(osp.join(curdir, os.pardir))
if newdir == curdir:
break
previousdir = curdir
curdir = newdir
conf_file_path = osp.join(curdir, CONF_FILE)
if osp.isfile(conf_file_path):
testercls = load_pytest_conf(conf_file_path, parser)
return previousdir, testercls
class GlobalTestReport(object):
"""this class holds global test statistics"""
def __init__(self):
self.ran = 0
self.skipped = 0
self.failures = 0
self.errors = 0
self.ttime = 0
self.ctime = 0
self.modulescount = 0
self.errmodules = []
def feed(self, filename, testresult, ttime, ctime):
"""integrates new test information into internal statistics"""
ran = testresult.testsRun
self.ran += ran
self.skipped += len(getattr(testresult, 'skipped', ()))
self.failures += len(testresult.failures)
self.errors += len(testresult.errors)
self.ttime += ttime
self.ctime += ctime
self.modulescount += 1
if not testresult.wasSuccessful():
problems = len(testresult.failures) + len(testresult.errors)
self.errmodules.append((filename[:-3], problems, ran))
def failed_to_test_module(self, filename):
"""called when the test module could not be imported by unittest
"""
self.errors += 1
self.modulescount += 1
self.ran += 1
self.errmodules.append((filename[:-3], 1, 1))
def skip_module(self, filename):
self.modulescount += 1
self.ran += 1
self.errmodules.append((filename[:-3], 0, 0))
def __str__(self):
"""this is just presentation stuff"""
line1 = ['Ran %s test cases in %.2fs (%.2fs CPU)'
% (self.ran, self.ttime, self.ctime)]
if self.errors:
line1.append('%s errors' % self.errors)
if self.failures:
line1.append('%s failures' % self.failures)
if self.skipped:
line1.append('%s skipped' % self.skipped)
modulesok = self.modulescount - len(self.errmodules)
if self.errors or self.failures:
line2 = '%s modules OK (%s failed)' % (modulesok,
len(self.errmodules))
descr = ', '.join(['%s [%s/%s]' % info for info in self.errmodules])
line3 = '\nfailures: %s' % descr
elif modulesok:
line2 = 'All %s modules OK' % modulesok
line3 = ''
else:
return ''
return '%s\n%s%s' % (', '.join(line1), line2, line3)
def remove_local_modules_from_sys(testdir):
"""remove all modules from cache that come from `testdir`
This is used to avoid strange side-effects when using the
testall() mode of pytest.
For instance, if we run pytest on this tree::
A/test/test_utils.py
B/test/test_utils.py
we **have** to clean sys.modules to make sure the correct test_utils
module is ran in B
"""
for modname, mod in list(sys.modules.items()):
if mod is None:
continue
if not hasattr(mod, '__file__'):
# this is the case of some built-in modules like sys, imp, marshal
continue
modfile = mod.__file__
# if modfile is not an absolute path, it was probably loaded locally
# during the tests
if not osp.isabs(modfile) or modfile.startswith(testdir):
del sys.modules[modname]
class PyTester(object):
"""encapsulates testrun logic"""
def __init__(self, cvg, options):
self.report = GlobalTestReport()
self.cvg = cvg
self.options = options
self.firstwrite = True
self._errcode = None
def show_report(self):
"""prints the report and returns appropriate exitcode"""
# everything has been ran, print report
print("*" * 79)
print(self.report)
def get_errcode(self):
# errcode set explicitly
if self._errcode is not None:
return self._errcode
return self.report.failures + self.report.errors
def set_errcode(self, errcode):
self._errcode = errcode
errcode = property(get_errcode, set_errcode)
def testall(self, exitfirst=False):
"""walks through current working directory, finds something
which can be considered as a testdir and runs every test there
"""
here = os.getcwd()
for dirname, dirs, _ in os.walk(here):
for skipped in STD_BLACKLIST:
if skipped in dirs:
dirs.remove(skipped)
basename = osp.basename(dirname)
if this_is_a_testdir(basename):
print("going into", dirname)
# we found a testdir, let's explore it !
if not self.testonedir(dirname, exitfirst):
break
dirs[:] = []
if self.report.ran == 0:
print("no test dir found testing here:", here)
# if no test was found during the visit, consider
# the local directory as a test directory even if
# it doesn't have a traditional test directory name
self.testonedir(here)
def testonedir(self, testdir, exitfirst=False):
"""finds each testfile in the `testdir` and runs it
return true when all tests has been executed, false if exitfirst and
some test has failed.
"""
files = abspath_listdir(testdir)
shuffle(files)
for filename in files:
if this_is_a_testfile(filename):
if self.options.exitfirst and not self.options.restart:
# overwrite restart file
try:
restartfile = open(FILE_RESTART, "w")
restartfile.close()
except Exception:
print("Error while overwriting succeeded test file :",
osp.join(os.getcwd(), FILE_RESTART),
file=sys.__stderr__)
raise
# run test and collect information
prog = self.testfile(filename, batchmode=True)
if exitfirst and (prog is None or not prog.result.wasSuccessful()):
return False
self.firstwrite = True
# clean local modules
remove_local_modules_from_sys(testdir)
return True
def testfile(self, filename, batchmode=False):
"""runs every test in `filename`
:param filename: an absolute path pointing to a unittest file
"""
here = os.getcwd()
dirname = osp.dirname(filename)
if dirname:
os.chdir(dirname)
# overwrite restart file if it has not been done already
if self.options.exitfirst and not self.options.restart and self.firstwrite:
try:
restartfile = open(FILE_RESTART, "w")
restartfile.close()
except Exception:
print("Error while overwriting succeeded test file :",
osp.join(os.getcwd(), FILE_RESTART), file=sys.__stderr__)
raise
modname = osp.basename(filename)[:-3]
print((' %s ' % osp.basename(filename)).center(70, '='),
file=sys.__stderr__)
try:
tstart, cstart = time(), clock()
try:
testprog = SkipAwareTestProgram(modname, batchmode=batchmode, cvg=self.cvg,
options=self.options, outstream=sys.stderr)
except KeyboardInterrupt:
raise
except SystemExit as exc:
self.errcode = exc.code
raise
except testlib.SkipTest:
print("Module skipped:", filename)
self.report.skip_module(filename)
return None
except Exception:
self.report.failed_to_test_module(filename)
print('unhandled exception occurred while testing', modname,
file=sys.stderr)
import traceback
traceback.print_exc(file=sys.stderr)
return None
tend, cend = time(), clock()
ttime, ctime = (tend - tstart), (cend - cstart)
self.report.feed(filename, testprog.result, ttime, ctime)
return testprog
finally:
if dirname:
os.chdir(here)
class DjangoTester(PyTester):
def load_django_settings(self, dirname):
"""try to find project's setting and load it"""
curdir = osp.abspath(dirname)
previousdir = curdir
while not osp.isfile(osp.join(curdir, 'settings.py')) and \
osp.isfile(osp.join(curdir, '__init__.py')):
newdir = osp.normpath(osp.join(curdir, os.pardir))
if newdir == curdir:
raise AssertionError('could not find settings.py')
previousdir = curdir
curdir = newdir
# late django initialization
settings = load_module_from_modpath(modpath_from_file(osp.join(curdir, 'settings.py')))
from django.core.management import setup_environ
setup_environ(settings)
settings.DEBUG = False
self.settings = settings
# add settings dir to pythonpath since it's the project's root
if curdir not in sys.path:
sys.path.insert(1, curdir)
def before_testfile(self):
# Those imports must be done **after** setup_environ was called
from django.test.utils import setup_test_environment
from django.test.utils import create_test_db
setup_test_environment()
create_test_db(verbosity=0)
self.dbname = self.settings.TEST_DATABASE_NAME
def after_testfile(self):
# Those imports must be done **after** setup_environ was called
from django.test.utils import teardown_test_environment
from django.test.utils import destroy_test_db
teardown_test_environment()
print('destroying', self.dbname)
destroy_test_db(self.dbname, verbosity=0)
def testall(self, exitfirst=False):
"""walks through current working directory, finds something
which can be considered as a testdir and runs every test there
"""
for dirname, dirs, files in os.walk(os.getcwd()):
for skipped in ('CVS', '.svn', '.hg'):
if skipped in dirs:
dirs.remove(skipped)
if 'tests.py' in files:
if not self.testonedir(dirname, exitfirst):
break
dirs[:] = []
else:
basename = osp.basename(dirname)
if basename in ('test', 'tests'):
print("going into", dirname)
# we found a testdir, let's explore it !
if not self.testonedir(dirname, exitfirst):
break
dirs[:] = []
def testonedir(self, testdir, exitfirst=False):
"""finds each testfile in the `testdir` and runs it
return true when all tests has been executed, false if exitfirst and
some test has failed.
"""
# special django behaviour : if tests are splitted in several files,
# remove the main tests.py file and tests each test file separately
testfiles = [fpath for fpath in abspath_listdir(testdir)
if this_is_a_testfile(fpath)]
if len(testfiles) > 1:
try:
testfiles.remove(osp.join(testdir, 'tests.py'))
except ValueError:
pass
for filename in testfiles:
# run test and collect information
prog = self.testfile(filename, batchmode=True)
if exitfirst and (prog is None or not prog.result.wasSuccessful()):
return False
# clean local modules
remove_local_modules_from_sys(testdir)
return True
def testfile(self, filename, batchmode=False):
"""runs every test in `filename`
:param filename: an absolute path pointing to a unittest file
"""
here = os.getcwd()
dirname = osp.dirname(filename)
if dirname:
os.chdir(dirname)
self.load_django_settings(dirname)
modname = osp.basename(filename)[:-3]
print((' %s ' % osp.basename(filename)).center(70, '='),
file=sys.stderr)
try:
try:
tstart, cstart = time(), clock()
self.before_testfile()
testprog = SkipAwareTestProgram(modname, batchmode=batchmode, cvg=self.cvg)
tend, cend = time(), clock()
ttime, ctime = (tend - tstart), (cend - cstart)
self.report.feed(filename, testprog.result, ttime, ctime)
return testprog
except SystemExit:
raise
except Exception as exc:
import traceback
traceback.print_exc()
self.report.failed_to_test_module(filename)
print('unhandled exception occurred while testing', modname)
print('error: %s' % exc)
return None
finally:
self.after_testfile()
if dirname:
os.chdir(here)
def make_parser():
"""creates the OptionParser instance
"""
from optparse import OptionParser
parser = OptionParser(usage=PYTEST_DOC)
parser.newargs = []
def rebuild_cmdline(option, opt, value, parser):
"""carry the option to unittest_main"""
parser.newargs.append(opt)
def rebuild_and_store(option, opt, value, parser):
"""carry the option to unittest_main and store
the value on current parser
"""
parser.newargs.append(opt)
setattr(parser.values, option.dest, True)
def capture_and_rebuild(option, opt, value, parser):
warnings.simplefilter('ignore', DeprecationWarning)
rebuild_cmdline(option, opt, value, parser)
# logilab-pytest options
parser.add_option('-t', dest='testdir', default=None,
help="directory where the tests will be found")
parser.add_option('-d', dest='dbc', default=False,
action="store_true", help="enable design-by-contract")
# unittest_main options provided and passed through logilab-pytest
parser.add_option('-v', '--verbose', callback=rebuild_cmdline,
action="callback", help="Verbose output")
parser.add_option('-i', '--pdb', callback=rebuild_and_store,
dest="pdb", action="callback",
help="Enable test failure inspection")
parser.add_option('-x', '--exitfirst', callback=rebuild_and_store,
dest="exitfirst", default=False,
action="callback", help="Exit on first failure "
"(only make sense when logilab-pytest run one test file)")
parser.add_option('-R', '--restart', callback=rebuild_and_store,
dest="restart", default=False,
action="callback",
help="Restart tests from where it failed (implies exitfirst) "
"(only make sense if tests previously ran with exitfirst only)")
parser.add_option('--color', callback=rebuild_cmdline,
action="callback",
help="colorize tracebacks")
parser.add_option('-s', '--skip',
# XXX: I wish I could use the callback action but it
# doesn't seem to be able to get the value
# associated to the option
action="store", dest="skipped", default=None,
help="test names matching this name will be skipped "
"to skip several patterns, use commas")
parser.add_option('-q', '--quiet', callback=rebuild_cmdline,
action="callback", help="Minimal output")
parser.add_option('-P', '--profile', default=None, dest='profile',
help="Profile execution and store data in the given file")
parser.add_option('-m', '--match', default=None, dest='tags_pattern',
help="only execute test whose tag match the current pattern")
if DJANGO_FOUND:
parser.add_option('-J', '--django', dest='django', default=False,
action="store_true",
help='use logilab-pytest for django test cases')
return parser
def parseargs(parser):
"""Parse the command line and return (options processed), (options to pass to
unittest_main()), (explicitfile or None).
"""
# parse the command line
options, args = parser.parse_args()
filenames = [arg for arg in args if arg.endswith('.py')]
if filenames:
if len(filenames) > 1:
parser.error("only one filename is acceptable")
explicitfile = filenames[0]
args.remove(explicitfile)
else:
explicitfile = None
# someone wants DBC
testlib.ENABLE_DBC = options.dbc
newargs = parser.newargs
if options.skipped:
newargs.extend(['--skip', options.skipped])
# restart implies exitfirst
if options.restart:
options.exitfirst = True
# append additional args to the new sys.argv and let unittest_main
# do the rest
newargs += args
return options, explicitfile
@deprecated('[logilab-common 1.3] logilab-pytest is deprecated, use another test runner')
def run():
parser = make_parser()
rootdir, testercls = project_root(parser)
options, explicitfile = parseargs(parser)
# mock a new command line
sys.argv[1:] = parser.newargs
cvg = None
if not '' in sys.path:
sys.path.insert(0, '')
if DJANGO_FOUND and options.django:
tester = DjangoTester(cvg, options)
else:
tester = testercls(cvg, options)
if explicitfile:
cmd, args = tester.testfile, (explicitfile,)
elif options.testdir:
cmd, args = tester.testonedir, (options.testdir, options.exitfirst)
else:
cmd, args = tester.testall, (options.exitfirst,)
try:
try:
if options.profile:
import hotshot
prof = hotshot.Profile(options.profile)
prof.runcall(cmd, *args)
prof.close()
print('profile data saved in', options.profile)
else:
cmd(*args)
except SystemExit:
raise
except:
import traceback
traceback.print_exc()
finally:
tester.show_report()
sys.exit(tester.errcode)
class SkipAwareTestProgram(unittest.TestProgram):
# XXX: don't try to stay close to unittest.py, use optparse
USAGE = """\
Usage: %(progName)s [options] [test] [...]
Options:
-h, --help Show this message
-v, --verbose Verbose output
-i, --pdb Enable test failure inspection
-x, --exitfirst Exit on first failure
-s, --skip skip test matching this pattern (no regexp for now)
-q, --quiet Minimal output
--color colorize tracebacks
-m, --match Run only test whose tag match this pattern
-P, --profile FILE: Run the tests using cProfile and saving results
in FILE
Examples:
%(progName)s - run default set of tests
%(progName)s MyTestSuite - run suite 'MyTestSuite'
%(progName)s MyTestCase.testSomething - run MyTestCase.testSomething
%(progName)s MyTestCase - run all 'test*' test methods
in MyTestCase
"""
def __init__(self, module='__main__', defaultTest=None, batchmode=False,
cvg=None, options=None, outstream=sys.stderr):
self.batchmode = batchmode
self.cvg = cvg
self.options = options
self.outstream = outstream
super(SkipAwareTestProgram, self).__init__(
module=module, defaultTest=defaultTest,
testLoader=NonStrictTestLoader())
def parseArgs(self, argv):
self.pdbmode = False
self.exitfirst = False
self.skipped_patterns = []
self.test_pattern = None
self.tags_pattern = None
self.colorize = False
self.profile_name = None
import getopt
try:
options, args = getopt.getopt(argv[1:], 'hHvixrqcp:s:m:P:',
['help', 'verbose', 'quiet', 'pdb',
'exitfirst', 'restart',
'skip=', 'color', 'match=', 'profile='])
for opt, value in options:
if opt in ('-h', '-H', '--help'):
self.usageExit()
if opt in ('-i', '--pdb'):
self.pdbmode = True
if opt in ('-x', '--exitfirst'):
self.exitfirst = True
if opt in ('-r', '--restart'):
self.restart = True
self.exitfirst = True
if opt in ('-q', '--quiet'):
self.verbosity = 0
if opt in ('-v', '--verbose'):
self.verbosity = 2
if opt in ('-s', '--skip'):
self.skipped_patterns = [pat.strip() for pat in
value.split(', ')]
if opt == '--color':
self.colorize = True
if opt in ('-m', '--match'):
#self.tags_pattern = value
self.options["tag_pattern"] = value
if opt in ('-P', '--profile'):
self.profile_name = value
self.testLoader.skipped_patterns = self.skipped_patterns
if len(args) == 0 and self.defaultTest is None:
suitefunc = getattr(self.module, 'suite', None)
if isinstance(suitefunc, (types.FunctionType,
types.MethodType)):
self.test = self.module.suite()
else:
self.test = self.testLoader.loadTestsFromModule(self.module)
return
if len(args) > 0:
self.test_pattern = args[0]
self.testNames = args
else:
self.testNames = (self.defaultTest, )
self.createTests()
except getopt.error as msg:
self.usageExit(msg)
def runTests(self):
if self.profile_name:
import cProfile
cProfile.runctx('self._runTests()', globals(), locals(), self.profile_name )
else:
return self._runTests()
def _runTests(self):
self.testRunner = SkipAwareTextTestRunner(verbosity=self.verbosity,
stream=self.outstream,
exitfirst=self.exitfirst,
pdbmode=self.pdbmode,
cvg=self.cvg,
test_pattern=self.test_pattern,
skipped_patterns=self.skipped_patterns,
colorize=self.colorize,
batchmode=self.batchmode,
options=self.options)
def removeSucceededTests(obj, succTests):
""" Recursive function that removes succTests from
a TestSuite or TestCase
"""
if isinstance(obj, unittest.TestSuite):
removeSucceededTests(obj._tests, succTests)
if isinstance(obj, list):
for el in obj[:]:
if isinstance(el, unittest.TestSuite):
removeSucceededTests(el, succTests)
elif isinstance(el, unittest.TestCase):
descr = '.'.join((el.__class__.__module__,
el.__class__.__name__,
el._testMethodName))
if descr in succTests:
obj.remove(el)
# take care, self.options may be None
if getattr(self.options, 'restart', False):
# retrieve succeeded tests from FILE_RESTART
try:
restartfile = open(FILE_RESTART, 'r')
try:
succeededtests = list(elem.rstrip('\n\r') for elem in
restartfile.readlines())
removeSucceededTests(self.test, succeededtests)
finally:
restartfile.close()
except Exception as ex:
raise Exception("Error while reading succeeded tests into %s: %s"
% (osp.join(os.getcwd(), FILE_RESTART), ex))
result = self.testRunner.run(self.test)
# help garbage collection: we want TestSuite, which hold refs to every
# executed TestCase, to be gc'ed
del self.test
if getattr(result, "debuggers", None) and \
getattr(self, "pdbmode", None):
start_interactive_mode(result)
if not getattr(self, "batchmode", None):
sys.exit(not result.wasSuccessful())
self.result = result
class SkipAwareTextTestRunner(unittest.TextTestRunner):
def __init__(self, stream=sys.stderr, verbosity=1,
exitfirst=False, pdbmode=False, cvg=None, test_pattern=None,
skipped_patterns=(), colorize=False, batchmode=False,
options=None):
super(SkipAwareTextTestRunner, self).__init__(stream=stream,
verbosity=verbosity)
self.exitfirst = exitfirst
self.pdbmode = pdbmode
self.cvg = cvg
self.test_pattern = test_pattern
self.skipped_patterns = skipped_patterns
self.colorize = colorize
self.batchmode = batchmode
self.options = options
def _this_is_skipped(self, testedname):
return any([(pat in testedname) for pat in self.skipped_patterns])
def _runcondition(self, test, skipgenerator=True):
if isinstance(test, testlib.InnerTest):
testname = test.name
else:
if isinstance(test, testlib.TestCase):
meth = test._get_test_method()
testname = '%s.%s' % (test.__name__, meth.__name__)
elif isinstance(test, types.FunctionType):
func = test
testname = func.__name__
elif isinstance(test, types.MethodType):
cls = test.__self__.__class__
testname = '%s.%s' % (cls.__name__, test.__name__)
else:
return True # Not sure when this happens
if isgeneratorfunction(test) and skipgenerator:
return self.does_match_tags(test) # Let inner tests decide at run time
if self._this_is_skipped(testname):
return False # this was explicitly skipped
if self.test_pattern is not None:
try:
classpattern, testpattern = self.test_pattern.split('.')
klass, name = testname.split('.')
if classpattern not in klass or testpattern not in name:
return False
except ValueError:
if self.test_pattern not in testname:
return False
return self.does_match_tags(test)
def does_match_tags(self, test):
if self.options is not None:
tags_pattern = getattr(self.options, 'tags_pattern', None)
if tags_pattern is not None:
tags = getattr(test, 'tags', testlib.Tags())
if tags.inherit and isinstance(test, types.MethodType):
tags = tags | getattr(test.__self__.__class__, 'tags', testlib.Tags())
return tags.match(tags_pattern)
return True # no pattern
def _makeResult(self):
return SkipAwareTestResult(self.stream, self.descriptions,
self.verbosity, self.exitfirst,
self.pdbmode, self.cvg, self.colorize)
def run(self, test):
"Run the given test case or test suite."
result = self._makeResult()
startTime = time()
test(result, runcondition=self._runcondition, options=self.options)
stopTime = time()
timeTaken = stopTime - startTime
result.printErrors()
if not self.batchmode:
self.stream.writeln(result.separator2)
run = result.testsRun
self.stream.writeln("Ran %d test%s in %.3fs" %
(run, run != 1 and "s" or "", timeTaken))
self.stream.writeln()
if not result.wasSuccessful():
if self.colorize:
self.stream.write(textutils.colorize_ansi("FAILED", color='red'))
else:
self.stream.write("FAILED")
else:
if self.colorize:
self.stream.write(textutils.colorize_ansi("OK", color='green'))
else:
self.stream.write("OK")
failed, errored, skipped = map(len, (result.failures,
result.errors,
result.skipped))
det_results = []
for name, value in (("failures", result.failures),
("errors",result.errors),
("skipped", result.skipped)):
if value:
det_results.append("%s=%i" % (name, len(value)))
if det_results:
self.stream.write(" (")
self.stream.write(', '.join(det_results))
self.stream.write(")")
self.stream.writeln("")
return result
class SkipAwareTestResult(unittest._TextTestResult):
def __init__(self, stream, descriptions, verbosity,
exitfirst=False, pdbmode=False, cvg=None, colorize=False):
super(SkipAwareTestResult, self).__init__(stream,
descriptions, verbosity)
self.skipped = []
self.debuggers = []
self.fail_descrs = []
self.error_descrs = []
self.exitfirst = exitfirst
self.pdbmode = pdbmode
self.cvg = cvg
self.colorize = colorize
self.pdbclass = Debugger
self.verbose = verbosity > 1
def descrs_for(self, flavour):
return getattr(self, '%s_descrs' % flavour.lower())
def _create_pdb(self, test_descr, flavour):
self.descrs_for(flavour).append( (len(self.debuggers), test_descr) )
if self.pdbmode:
self.debuggers.append(self.pdbclass(sys.exc_info()[2]))
def _iter_valid_frames(self, frames):
"""only consider non-testlib frames when formatting traceback"""
lgc_testlib = osp.abspath(__file__)
std_testlib = osp.abspath(unittest.__file__)
invalid = lambda fi: osp.abspath(fi[1]) in (lgc_testlib, std_testlib)
for frameinfo in dropwhile(invalid, frames):
yield frameinfo
def _exc_info_to_string(self, err, test):
"""Converts a sys.exc_info()-style tuple of values into a string.
This method is overridden here because we want to colorize
lines if --color is passed, and display local variables if
--verbose is passed
"""
exctype, exc, tb = err
output = ['Traceback (most recent call last)']
frames = inspect.getinnerframes(tb)
colorize = self.colorize
frames = enumerate(self._iter_valid_frames(frames))
for index, (frame, filename, lineno, funcname, ctx, ctxindex) in frames:
filename = osp.abspath(filename)
if ctx is None: # pyc files or C extensions for instance
source = '<no source available>'
else:
source = ''.join(ctx)
if colorize:
filename = textutils.colorize_ansi(filename, 'magenta')
source = colorize_source(source)
output.append(' File "%s", line %s, in %s' % (filename, lineno, funcname))
output.append(' %s' % source.strip())
if self.verbose:
output.append('%r == %r' % (dir(frame), test.__module__))
output.append('')
output.append(' ' + ' local variables '.center(66, '-'))
for varname, value in sorted(frame.f_locals.items()):
output.append(' %s: %r' % (varname, value))
if varname == 'self': # special handy processing for self
for varname, value in sorted(vars(value).items()):
output.append(' self.%s: %r' % (varname, value))
output.append(' ' + '-' * 66)
output.append('')
output.append(''.join(traceback.format_exception_only(exctype, exc)))
return '\n'.join(output)
def addError(self, test, err):
"""err -> (exc_type, exc, tcbk)"""
exc_type, exc, _ = err
if isinstance(exc, testlib.SkipTest):
assert exc_type == SkipTest
self.addSkip(test, exc)
else:
if self.exitfirst:
self.shouldStop = True
descr = self.getDescription(test)
super(SkipAwareTestResult, self).addError(test, err)
self._create_pdb(descr, 'error')
def addFailure(self, test, err):
if self.exitfirst:
self.shouldStop = True
descr = self.getDescription(test)
super(SkipAwareTestResult, self).addFailure(test, err)
self._create_pdb(descr, 'fail')
def addSkip(self, test, reason):
self.skipped.append((test, reason))
if self.showAll:
self.stream.writeln("SKIPPED")
elif self.dots:
self.stream.write('S')
def printErrors(self):
super(SkipAwareTestResult, self).printErrors()
self.printSkippedList()
def printSkippedList(self):
# format (test, err) compatible with unittest2
for test, err in self.skipped:
descr = self.getDescription(test)
self.stream.writeln(self.separator1)
self.stream.writeln("%s: %s" % ('SKIPPED', descr))
self.stream.writeln("\t%s" % err)
def printErrorList(self, flavour, errors):
for (_, descr), (test, err) in zip(self.descrs_for(flavour), errors):
self.stream.writeln(self.separator1)
self.stream.writeln("%s: %s" % (flavour, descr))
self.stream.writeln(self.separator2)
self.stream.writeln(err)
self.stream.writeln('no stdout'.center(len(self.separator2)))
self.stream.writeln('no stderr'.center(len(self.separator2)))
from .decorators import monkeypatch
orig_call = testlib.TestCase.__call__
@monkeypatch(testlib.TestCase, '__call__')
def call(self, result=None, runcondition=None, options=None):
orig_call(self, result=result, runcondition=runcondition, options=options)
if hasattr(options, "exitfirst") and options.exitfirst:
# add this test to restart file
try:
restartfile = open(FILE_RESTART, 'a')
try:
descr = '.'.join((self.__class__.__module__,
self.__class__.__name__,
self._testMethodName))
restartfile.write(descr+os.linesep)
finally:
restartfile.close()
except Exception:
print("Error while saving succeeded test into",
osp.join(os.getcwd(), FILE_RESTART),
file=sys.__stderr__)
raise
@monkeypatch(testlib.TestCase)
def defaultTestResult(self):
"""return a new instance of the defaultTestResult"""
return SkipAwareTestResult()
class NonStrictTestLoader(unittest.TestLoader):
"""
Overrides default testloader to be able to omit classname when
specifying tests to run on command line.
For example, if the file test_foo.py contains ::
class FooTC(TestCase):
def test_foo1(self): # ...
def test_foo2(self): # ...
def test_bar1(self): # ...
class BarTC(TestCase):
def test_bar2(self): # ...
'python test_foo.py' will run the 3 tests in FooTC
'python test_foo.py FooTC' will run the 3 tests in FooTC
'python test_foo.py test_foo' will run test_foo1 and test_foo2
'python test_foo.py test_foo1' will run test_foo1
'python test_foo.py test_bar' will run FooTC.test_bar1 and BarTC.test_bar2
"""
def __init__(self):
self.skipped_patterns = ()
# some magic here to accept empty list by extending
# and to provide callable capability
def loadTestsFromNames(self, names, module=None):
suites = []
for name in names:
suites.extend(self.loadTestsFromName(name, module))
return self.suiteClass(suites)
def _collect_tests(self, module):
tests = {}
for obj in vars(module).values():
if isclass(obj) and issubclass(obj, unittest.TestCase):
classname = obj.__name__
if classname[0] == '_' or self._this_is_skipped(classname):
continue
methodnames = []
# obj is a TestCase class
for attrname in dir(obj):
if attrname.startswith(self.testMethodPrefix):
attr = getattr(obj, attrname)
if callable(attr):
methodnames.append(attrname)
# keep track of class (obj) for convenience
tests[classname] = (obj, methodnames)
return tests
def loadTestsFromSuite(self, module, suitename):
try:
suite = getattr(module, suitename)()
except AttributeError:
return []
assert hasattr(suite, '_tests'), \
"%s.%s is not a valid TestSuite" % (module.__name__, suitename)
# python2.3 does not implement __iter__ on suites, we need to return
# _tests explicitly
return suite._tests
def loadTestsFromName(self, name, module=None):
parts = name.split('.')
if module is None or len(parts) > 2:
# let the base class do its job here
return [super(NonStrictTestLoader, self).loadTestsFromName(name)]
tests = self._collect_tests(module)
collected = []
if len(parts) == 1:
pattern = parts[0]
if callable(getattr(module, pattern, None)
) and pattern not in tests:
# consider it as a suite
return self.loadTestsFromSuite(module, pattern)
if pattern in tests:
# case python unittest_foo.py MyTestTC
klass, methodnames = tests[pattern]
for methodname in methodnames:
collected = [klass(methodname)
for methodname in methodnames]
else:
# case python unittest_foo.py something
for klass, methodnames in tests.values():
# skip methodname if matched by skipped_patterns
for skip_pattern in self.skipped_patterns:
methodnames = [methodname
for methodname in methodnames
if skip_pattern not in methodname]
collected += [klass(methodname)
for methodname in methodnames
if pattern in methodname]
elif len(parts) == 2:
# case "MyClass.test_1"
classname, pattern = parts
klass, methodnames = tests.get(classname, (None, []))
for methodname in methodnames:
collected = [klass(methodname) for methodname in methodnames
if pattern in methodname]
return collected
def _this_is_skipped(self, testedname):
return any([(pat in testedname) for pat in self.skipped_patterns])
def getTestCaseNames(self, testCaseClass):
"""Return a sorted sequence of method names found within testCaseClass
"""
is_skipped = self._this_is_skipped
classname = testCaseClass.__name__
if classname[0] == '_' or is_skipped(classname):
return []
testnames = super(NonStrictTestLoader, self).getTestCaseNames(
testCaseClass)
return [testname for testname in testnames if not is_skipped(testname)]
# The 2 functions below are modified versions of the TestSuite.run method
# that is provided with unittest2 for python 2.6, in unittest2/suite.py
# It is used to monkeypatch the original implementation to support
# extra runcondition and options arguments (see in testlib.py)
def _ts_run(self, result, runcondition=None, options=None):
self._wrapped_run(result, runcondition=runcondition, options=options)
self._tearDownPreviousClass(None, result)
self._handleModuleTearDown(result)
return result
def _ts_wrapped_run(self, result, debug=False, runcondition=None, options=None):
for test in self:
if result.shouldStop:
break
if unittest_suite._isnotsuite(test):
self._tearDownPreviousClass(test, result)
self._handleModuleFixture(test, result)
self._handleClassSetUp(test, result)
result._previousTestClass = test.__class__
if (getattr(test.__class__, '_classSetupFailed', False) or
getattr(result, '_moduleSetUpFailed', False)):
continue
# --- modifications to deal with _wrapped_run ---
# original code is:
#
# if not debug:
# test(result)
# else:
# test.debug()
if hasattr(test, '_wrapped_run'):
try:
test._wrapped_run(result, debug, runcondition=runcondition, options=options)
except TypeError:
test._wrapped_run(result, debug)
elif not debug:
try:
test(result, runcondition, options)
except TypeError:
test(result)
else:
test.debug()
# --- end of modifications to deal with _wrapped_run ---
return result
if sys.version_info >= (2, 7):
# The function below implements a modified version of the
# TestSuite.run method that is provided with python 2.7, in
# unittest/suite.py
def _ts_run(self, result, debug=False, runcondition=None, options=None):
topLevel = False
if getattr(result, '_testRunEntered', False) is False:
result._testRunEntered = topLevel = True
self._wrapped_run(result, debug, runcondition, options)
if topLevel:
self._tearDownPreviousClass(None, result)
self._handleModuleTearDown(result)
result._testRunEntered = False
return result
def enable_dbc(*args):
"""
Without arguments, return True if contracts can be enabled and should be
enabled (see option -d), return False otherwise.
With arguments, return False if contracts can't or shouldn't be enabled,
otherwise weave ContractAspect with items passed as arguments.
"""
if not ENABLE_DBC:
return False
try:
from logilab.aspects.weaver import weaver
from logilab.aspects.lib.contracts import ContractAspect
except ImportError:
sys.stderr.write(
'Warning: logilab.aspects is not available. Contracts disabled.')
return False
for arg in args:
weaver.weave_module(arg, ContractAspect)
return True
# monkeypatch unittest and doctest (ouch !)
unittest._TextTestResult = SkipAwareTestResult
unittest.TextTestRunner = SkipAwareTextTestRunner
unittest.TestLoader = NonStrictTestLoader
unittest.TestProgram = SkipAwareTestProgram
if sys.version_info >= (2, 4):
doctest.DocTestCase.__bases__ = (testlib.TestCase,)
# XXX check python2.6 compatibility
#doctest.DocTestCase._cleanups = []
#doctest.DocTestCase._out = []
else:
unittest.FunctionTestCase.__bases__ = (testlib.TestCase,)
unittest.TestSuite.run = _ts_run
unittest.TestSuite._wrapped_run = _ts_wrapped_run
if __name__ == '__main__':
run()
| fmv1992/python-mode | pymode/libs/logilab-common-1.4.1/logilab/common/pytest.py | Python | lgpl-3.0 | 51,738 | [
"VisIt"
] | 9165725803f048ee2ee462f8c5ee4465bae3f928271d63028f2b2f207738ecac |
#!/usr/bin/env python
import os, sys, logging, string, textwrap
new_path = [ os.path.join( os.getcwd(), "lib" ) ]
new_path.extend( sys.path[1:] )
sys.path = new_path
log = logging.getLogger()
log.setLevel( 10 )
log.addHandler( logging.StreamHandler( sys.stdout ) )
from galaxy import eggs
import pkg_resources
pkg_resources.require( "SQLAlchemy >= 0.4" )
import time, ConfigParser, shutil
from datetime import datetime, timedelta
from time import strftime
from optparse import OptionParser
from galaxy.tools import parameters
from tool_shed.util.common_util import url_join
import galaxy.webapps.tool_shed.config as tool_shed_config
import galaxy.webapps.tool_shed.model.mapping
import sqlalchemy as sa
from galaxy.model.orm import and_, not_, distinct
from galaxy.util import send_mail as galaxy_send_mail
assert sys.version_info[:2] >= ( 2, 4 )
def build_citable_url( host, repository ):
return url_join( host, 'view', repository.user.username, repository.name )
def main():
'''
Script to deprecate any repositories that are older than n days, and have been empty since creation.
'''
parser = OptionParser()
parser.add_option( "-d", "--days", dest="days", action="store", type="int", help="number of days (14)", default=14 )
parser.add_option( "-i", "--info_only", action="store_true", dest="info_only", help="info about the requested action", default=False )
parser.add_option( "-v", "--verbose", action="store_true", dest="verbose", help="verbose mode, print the name of each repository", default=False )
( options, args ) = parser.parse_args()
ini_file = args[0]
config_parser = ConfigParser.ConfigParser( {'here':os.getcwd()} )
config_parser.read( ini_file )
config_dict = {}
for key, value in config_parser.items( "app:main" ):
config_dict[key] = value
config = tool_shed_config.Configuration( **config_dict )
app = DeprecateRepositoriesApplication( config )
cutoff_time = datetime.utcnow() - timedelta( days=options.days )
now = strftime( "%Y-%m-%d %H:%M:%S" )
print "\n####################################################################################"
print "# %s - Handling stuff older than %i days" % ( now, options.days )
if options.info_only:
print "# Displaying info only ( --info_only )"
deprecate_repositories( app, cutoff_time, days=options.days, info_only=options.info_only, verbose=options.verbose )
def send_mail_to_owner( app, name, owner, email, repositories_deprecated, days=14 ):
'''
Sends an email to the owner of the provided repository.
'''
smtp_server = app.config.get( 'smtp_server', None )
from_address = app.config.get( 'email_from', None )
# Since there is no way to programmatically determine the URL for the tool shed from the .ini file, this method requires that
# an environment variable named TOOL_SHED_CANONICAL_URL be set, pointing to the tool shed that is being checked.
url = os.environ.get( 'TOOL_SHED_CANONICAL_URL', None )
if None in [ smtp_server, from_address ]:
print '# Mail not configured, not sending email to repository owner.'
return
elif url is None:
print '# Environment variable TOOL_SHED_CANONICAL_URL not set, not sending email to repository owner.'
return
subject = "Regarding your tool shed repositories at %s" % url
message_body_template = 'The tool shed automated repository checker has discovered that one or more of your repositories hosted ' + \
'at this tool shed url ${url} have remained empty for over ${days} days, so they have been marked as deprecated. If you have plans ' + \
'for these repositories, you can mark them as un-deprecated at any time.'
message_template = string.Template( message_body_template )
body = '\n'.join( textwrap.wrap( message_template.safe_substitute( days=days, url=url ), width=95 ) )
body += '\n\n'
body += 'Repositories that were deprecated:\n'
body += '\n'.join( [ build_citable_url( url, repository ) for repository in repositories_deprecated ] )
try:
galaxy_send_mail( from_address, repository.user.email, subject, body, app.config )
print "# An email has been sent to %s, the owner of %s." % ( repository.user.username, ', '.join( [ repository.name for repository in repositories_deprecated ] ) )
return True
except Exception, e:
print "# An error occurred attempting to send email: %s" % str( e )
return False
def deprecate_repositories( app, cutoff_time, days=14, info_only=False, verbose=False ):
# This method will get a list of repositories that were created on or before cutoff_time, but have never
# had any metadata records associated with them. Then it will iterate through that list and deprecate the
# repositories, sending an email to each repository owner.
dataset_count = 0
disk_space = 0
start = time.time()
repository_ids_to_not_check = []
# Get a unique list of repository ids from the repository_metadata table. Any repository ID found in this table is not
# empty, and will not be checked.
metadata_records = sa.select( [ distinct( app.model.RepositoryMetadata.table.c.repository_id ) ],
from_obj=app.model.RepositoryMetadata.table ) \
.execute()
for metadata_record in metadata_records:
repository_ids_to_not_check.append( metadata_record.repository_id )
# Get the repositories that are A) not present in the above list, and b) older than the specified time.
# This will yield a list of repositories that have been created more than n days ago, but never populated.
repository_query = sa.select( [ app.model.Repository.table.c.id ],
whereclause = and_( app.model.Repository.table.c.create_time < cutoff_time,
app.model.Repository.table.c.deprecated == False,
app.model.Repository.table.c.deleted == False,
not_( app.model.Repository.table.c.id.in_( repository_ids_to_not_check ) ) ),
from_obj = [ app.model.Repository.table ] )
query_result = repository_query.execute()
repositories = []
repositories_by_owner = {}
repository_ids = [ row.id for row in query_result ]
# Iterate through the list of repository ids for empty repositories and deprecate them unless info_only is set.
for repository_id in repository_ids:
repository = app.sa_session.query( app.model.Repository ) \
.filter( app.model.Repository.table.c.id == repository_id ) \
.one()
owner = repository.user
if info_only:
print '# Repository %s owned by %s would have been deprecated, but info_only was set.' % ( repository.name, repository.user.username )
else:
if verbose:
print '# Deprecating repository %s owned by %s.' % ( repository.name, owner.username )
if owner.username not in repositories_by_owner:
repositories_by_owner[ owner.username ] = dict( owner=owner, repositories=[] )
repositories_by_owner[ owner.username ][ 'repositories' ].append( repository )
repositories.append( repository )
# Send an email to each repository owner, listing the repositories that were deprecated.
for repository_owner in repositories_by_owner:
for repository in repositories_by_owner[ repository_owner ][ 'repositories' ]:
repository.deprecated = True
app.sa_session.add( repository )
app.sa_session.flush()
owner = repositories_by_owner[ repository_owner ][ 'owner' ]
send_mail_to_owner( app, repository.name, owner.username, owner.email, repositories_by_owner[ repository_owner ][ 'repositories' ], days )
stop = time.time()
print '# Deprecated %d repositories.' % len( repositories )
print "# Elapsed time: ", stop - start
print "####################################################################################"
class DeprecateRepositoriesApplication( object ):
"""Encapsulates the state of a Universe application"""
def __init__( self, config ):
if config.database_connection is False:
config.database_connection = "sqlite:///%s?isolation_level=IMMEDIATE" % config.database
# Setup the database engine and ORM
self.model = galaxy.webapps.tool_shed.model.mapping.init( config.file_path, config.database_connection, engine_options={}, create_tables=False )
self.config = config
@property
def sa_session( self ):
"""
Returns a SQLAlchemy session -- currently just gets the current
session from the threadlocal session context, but this is provided
to allow migration toward a more SQLAlchemy 0.4 style of use.
"""
return self.model.context.current
def shutdown( self ):
pass
if __name__ == "__main__": main()
| mikel-egana-aranguren/SADI-Galaxy-Docker | galaxy-dist/lib/tool_shed/scripts/deprecate_repositories_without_metadata.py | Python | gpl-3.0 | 9,139 | [
"Galaxy"
] | 64b1805115dd9ea88064179736a4ebfd8035a62f6e6ae7f9b739cfcd9557365e |
"""
This python script encodes a video for Samsung Galaxy S5 Mini
Copyright 2016 Markus Kastner
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import subprocess
import os
import sys
import argparse
import re
import xml.etree.ElementTree as ET
import tkinter as tk
from tkinter import filedialog
#-------------------------------------------------------------------------------
# CONFIGURABLE SETTINGS
#-------------------------------------------------------------------------------
# controls the quality of the encode
CRF = '21'
# h.264 profile
PROFILE = 'baseline'
# encoding speed:compression ratio
PRESET = 'slow'
# path to ffmpeg bin
FFMPEG_PATH = 'D:\\tmp\\ffmpeg.exe'
# path to ffprobe bin
FFPROBE_PATH = 'D:\\tmp\\ffprobe.exe'
# path to temp directory
TEMPDIR = 'tmp'
#-------------------------------------------------------------------------------
# encoding script
#-------------------------------------------------------------------------------
def encode(input,output,start=None,duration=None,volume=None,resolution=None,device='default.xml'):
if device == None:
device='default.xml'
try:
xml = ET.ElementTree(file=device)
except FileNotFoundError:
print ("File",device,"not found")
sys.exit(1)
video = xml.find('video')
crf = video.findtext('crf',default=CRF)
profile = video.findtext('profile',default=PROFILE)
vcodec = video.findtext('codec',default='libx264')
lines = int(video.findtext('lines',default='720'))
audio = xml.find('audio')
acodec = audio.findtext('codec',default='libmp3lame')
abitrate = audio.findtext('bitrate',default='192k')
channels = int(audio.findtext('channels',default='2'))
try:
command = [FFMPEG_PATH]
# Input file
command += [ '-i', input]
if start is not None:
command += [ '-ss', str(start)]
if duration is not None:
command += [ '-t', str(duration)]
# Video codec
command += [ '-y', '-c:v', vcodec, '-preset', PRESET, '-profile:v', profile, '-crf', crf]
# Create filter string
filters = ['yadif']
if (resolution == None) or (resolution[1] > lines):
scaleFilter = 'scale='+str(lines)+'*dar:'+str(lines)
filters += [scaleFilter]
command += ["-vf",','.join(filters)]
command += ['-c:a', acodec, '-b:a', abitrate, '-ac', str(channels)]
# Create filter string
if volume:
volume = volume * -1
filters = ['volume='+str(volume)+'dB']
command += ["-af",','.join(filters)]
command += [output]
print(command)
subprocess.call(command) # encode the video!
except:
print("Error encoding:",input)
# finally:
# always cleanup even if there are errors
def encodeCopyConcat(output):
try:
command = [FFMPEG_PATH]
command += [ '-f' , 'concat' , '-i', os.path.join(TEMPDIR,"concat.txt"),
'-y', '-c:v', 'copy', '-c:a', 'copy', '-map', '0' , '-sn']
command += [output]
print(command)
subprocess.call(command) # encode the video!
except:
print("Error encoding:",input)
if __name__ == "__main__":
summary="Summary:\n"
# Parsing arguments
parser = argparse.ArgumentParser(description='Encode a video')
parser.add_argument('-i', help='xml-file describing input file')
parser.add_argument('-o', help='output video file')
parser.add_argument('--device', help='XML file specifying device settings')
args = parser.parse_args()
root = tk.Tk()
root.withdraw()
if args.i:
xmlFile = args.i
else:
xmlFile = filedialog.askopenfilename(title='Input file (xml)',filetypes=[('xml files', '.xml')])
try:
xml = ET.ElementTree(file=xmlFile)
except FileNotFoundError:
print ("File",args.input,"not found")
sys.exit(1)
if args.i:
output = args.o
else:
output = filedialog.asksaveasfilename(title='output video file',filetypes=[('MKV files', '.mkv'), ('MP4 files', '.mp4'), ('MPG files', '.mpg')])
path = xml.find('path').text
basename = xml.find('basename').text
uncutlist_xml = xml.find('uncutlist')
startstoplist = list(uncutlist_xml.iter())
uncutlist = []
start = startstoplist[1].text
stop = None
for x in range(1,len(startstoplist),2):
if x+1 < len(startstoplist):
uncutlist.append([startstoplist[x].text,startstoplist[x+1].text])
stop = startstoplist[x+1].text
else:
uncutlist.append([startstoplist[x].text,None])
stop = None
input = os.path.join(path,basename)
summary+="Input: "+input+"\n"
summary+="Output: "+output+"\n"
print(input, output)
# Probing input file
command= [FFMPEG_PATH, '-i', input]
if start is not None:
command += [ '-ss', str(start)]
if stop is not None:
command += [ '-t', str(stop-start)]
command += [ '-vn', '-sn', '-af', 'volumedetect', '-f', 'null', '/dev/null']
result=subprocess.run(command,stderr=subprocess.PIPE)
probe=result.stderr.decode("utf-8")
# getting max volume
try:
match=re.findall("max_volume: (.*) dB",probe)
volume=[float(x) for x in match]
volume.sort()
summary+="Volume: "+str(volume[0])+"\n"
except:
volume=None
# getting resolution
match=re.search("(\d{3,4})x(\d{3,4})",probe)
if match:
resolution=[int(match.group(1)),int(match.group(2))]
summary+="Resolution: "+match.group(0)+"\n"
else:
resolution=None
if len(uncutlist) == 0:
encode(input,output,resolution=resolution,device=args.device,volume=volume[0])
else:
concat_file = open(os.path.join(TEMPDIR,"concat.txt"), "w", encoding="utf-8")
for x in range(len(uncutlist)):
tempname = "temp_"+str(x)+".mkv"
tempname = os.path.join(TEMPDIR, tempname)
concat_string="file '"+tempname+"'\n"
concat_file.write(concat_string)
if uncutlist[x][1] == None:
length = None
else:
length = int(uncutlist[x][1]) - int(uncutlist[x][0])
encode(input,tempname,resolution=resolution,start=uncutlist[x][0],duration=length,device=args.device,volume=volume[0])
concat_file.close()
encodeCopyConcat(output)
print(summary)
print(resolution[1])
| lomion0815/transcodeVideo | transcodeVideo.py | Python | gpl-3.0 | 7,207 | [
"Galaxy"
] | 15f44024953a4a959873d0eba4ded3e77420cd4d4522a0307c575a86e3c62150 |
"""
Simplest example to generate a .fit, .mod and .dat file to feed in MrMoose for
demonstration. The model consists of a double power-law with a break frequency
and six data points from a source at z=0
"""
import models as md
import numpy as np
import mm_utilities as mm
import read_files as rd
#def fake_sync_source():
# define the parameters of the sync law and create
norm = 1.0
nu_break = 9.0
alpha1 = 2.
alpha2 = -1.5
nu = 10**np.linspace(6, 11, 10000)
redshift = 0.
fnu = md.double_sync_law(nu, [norm, nu_break, alpha1, alpha2], redshift)
filter_name = np.array(['74MHz(VLA)', '178MHz', '408MHz', '1.4GHz', '4.85GHz', '8.4GHz'])
sn_mod = [15., 15., 15., 15., 15., 15.]
RA_list = ['12h00m00s', ]*6
Dec_list = ['-40d00m00s', ]*6
res_list = [12., ]*6
fnu_mod = np.zeros(filter_name.size)
fnu_err = np.zeros(filter_name.size)
lambda0 = np.zeros(filter_name.size)
# run through the filters
for i_filter, name_filter in enumerate(filter_name):
# read the filter transmission
nu_filter, trans_filter = rd.read_single_filter('filters/'+name_filter+'.fil')
# calculate the lambda0
lambda0[i_filter] = np.average(nu_filter, weights=trans_filter)
# perform the integration
tmp = mm.integrate_filter(nu, fnu, nu_filter, trans_filter)
# add a gaussian noise (depending on the signal to noise defined previously)
fnu_err[i_filter] = tmp/sn_mod[i_filter]
fnu_mod[i_filter] = np.random.normal(tmp, fnu_err[i_filter])
# create the data file
with open('data/fake_source_ex1c.dat', 'wb') as fake:
fake.writelines("# filter RA Dec resolution lambda0 det_type flux "
"flux_error arrangement component component_number \n")
for i_filter in range(filter_name.size-1):
fake.write('{:15} {:15} {:15} {:5.1f} {:10e} {:5} {:10e} {:10e} {:10} {:10} {:10} \n'.format(
filter_name[i_filter], RA_list[i_filter], Dec_list[i_filter], res_list[i_filter],
lambda0[i_filter], "d", fnu_mod[i_filter], fnu_err[i_filter], "1", "note", "0"))
fake.write('{:15} {:15} {:15} {:5.1f} {:10e} {:5} {:10e} {:10e} {:10} {:10} {:10}'.format(
filter_name[i_filter+1], RA_list[i_filter+1], Dec_list[i_filter+1], res_list[i_filter+1],
lambda0[i_filter+1], "d", fnu_mod[i_filter+1], fnu_err[i_filter+1], "1", "note", "0,"))
# create the fit file
with open('fake_source_ex1c.fit', 'wb') as fake:
fake.write('source_file: data/fake_source_ex1c.dat \n')
fake.write('model_file: models/fake_source_ex1c.mod \n')
fake.write('all_same_redshift: True \n')
fake.write('redshift: ['+str(redshift)+'] \n')
fake.write('nwalkers: 20 \n')
fake.write('nsteps: 60 \n')
fake.write('nsteps_cut: 58 \n')
fake.write('percentiles: [10., 25., 50., 75., 90.] \n')
fake.write('skip_imaging: False \n')
fake.write('skip_fit: False \n')
fake.write('skip_MCChains: False \n')
fake.write('skip_triangle: False \n')
fake.write('skip_SED: False \n')
fake.write("unit_obs: 'Hz' \n")
fake.write("unit_flux: 'Jy' \n")
# create the model file
with open('models/fake_source_ex1c.mod', 'wb') as fake:
fake.write('double_sync_law 4 \n')
fake.write('$N$ -25 -15 \n')
fake.write('$\\nu_{break}$ 7.0 10.0 \n')
fake.write('$\\alpha_1$ 0.0 2.5 \n')
fake.write('$\\alpha_2$ -2.5 0.0 \n')
| gdrouart/MrMoose | examples/example_1c.py | Python | gpl-3.0 | 3,358 | [
"Gaussian"
] | 1c57af21395b70a9af1a68cf988d77cb52bddff18b543bf3936f16177f86fba6 |
from collections import defaultdict
from rdkit.Chem import BondType, rdMolDescriptors
from ._base import Descriptor
__all__ = ("TopoPSA",)
class TopoPSA(Descriptor):
r"""topological polar surface area descriptor(NO only: rdkit wrapper).
:type no_only: bool
:param no_only:
* True: N,O only TPSA
* False: all(N,O,S,P) TPSA
References
* :doi:`10.1021/jm000942e`
"""
since = "1.0.0"
__slots__ = ("_no_only",)
def description(self):
return "topological polar surface area{}".format(
" (use only nitrogen and oxygen)" if self._no_only else ""
)
@classmethod
def preset(cls, version):
yield cls(True)
yield cls(False)
def __str__(self):
return "TopoPSA(NO)" if self._no_only else "TopoPSA"
def parameters(self):
return (self._no_only,)
def __init__(self, no_only=True):
self._no_only = no_only
def calculate(self):
tpsa = rdMolDescriptors.CalcTPSA(self.mol)
if self._no_only:
return tpsa
for atom in self.mol.GetAtoms():
atomic_num = atom.GetAtomicNum()
if atomic_num == 15:
tpsa += self._get_phosphorus_contrib(atom)
elif atomic_num == 16:
tpsa += self._get_sulfur_contrib(atom)
return tpsa
@staticmethod
def _hydrogen_count(atom):
return atom.GetTotalNumHs() + sum(
1 for a in atom.GetNeighbors() if a.GetAtomicNum() == 1
)
@staticmethod
def _bond_type_count(atom):
cnt = defaultdict(int)
for bond in atom.GetBonds():
if bond.GetIsAromatic():
cnt[BondType.AROMATIC] += 1
else:
cnt[bond.GetBondType()] += 1
return dict(cnt)
@classmethod
def _get_phosphorus_contrib(cls, atom):
nH = cls._hydrogen_count(atom)
cnt = cls._bond_type_count(atom)
if atom.GetFormalCharge() != 0 or atom.GetIsAromatic():
return 0.0
if nH == 1 and cnt == {BondType.SINGLE: 3, BondType.DOUBLE: 1}:
return 23.47
elif nH == 0:
if cnt == {BondType.SINGLE: 3}:
return 13.59
elif cnt == {BondType.SINGLE: 1, BondType.DOUBLE: 1}:
return 34.14
elif cnt == {BondType.SINGLE: 3, BondType.DOUBLE: 1}:
return 9.81
return 0.0
@classmethod
def _get_sulfur_contrib(cls, atom):
nH = cls._hydrogen_count(atom)
cnt = cls._bond_type_count(atom)
if atom.GetFormalCharge() != 0:
return 0.0
if atom.GetIsAromatic():
if nH == 0:
if cnt == {BondType.AROMATIC: 2}:
return 28.24
elif cnt == {BondType.AROMATIC: 2, BondType.DOUBLE: 1}:
return 21.70
else:
if nH == 1 and cnt == {BondType.SINGLE: 2}:
return 38.80
elif nH == 0:
if cnt == {BondType.SINGLE: 2}:
return 25.30
elif cnt == {BondType.DOUBLE: 1}:
return 32.09
elif cnt == {BondType.SINGLE: 2, BondType.DOUBLE: 1}:
return 19.21
elif cnt == {BondType.SINGLE: 2, BondType.DOUBLE: 2}:
return 8.38
return 0.0
rtype = float
| mordred-descriptor/mordred | mordred/TopoPSA.py | Python | bsd-3-clause | 3,438 | [
"RDKit"
] | 52259f06c679e55fef29f6fba0031659161ac5233c586033cdb645344d40f0f4 |
#!/usr/bin/python
#
# portchecker.py
#
# Brian Bolander
#
#
import socket
import sys
import re
import pdb
import getopt
import os
"""
_______________________________________________________________________________
readportsfile
_______________________________________________________________________________
"""
def readportsfile(filename):
"""
Read the file that contains the information about the ports and return
a data structure with the following format.
ports = {
# hostname port name port
'codex': {
'telnet': 22,
'webserver': 80
},
'etmessrv01': {
'asadmin': 4848,
'webserver': 80
},
}
File format: One port per line...
<hostname> <port name (one word, non-whitespace characters)> <port number>
<hostname> <port name (one word, non-whitespace characters)> <port number>
.
.
.
filename: The name of the file to be read.
"""
debug = False
ports = dict()
hostname = ""
portnumber = ""
portname = ""
#pdb.set_trace()
try:
portsfile = open(portscpath + filename, 'r')
except IOError:
try:
portsfile = open(filename, 'r')
except IOError, e:
print "Error opening file: %s" % e
sys.exit(2)
for line in portsfile:
line = line.rstrip("\n")
#print line
#if re.match('^[^#]\w*\W*\w*\W*\d*$', line) and line:
if re.match('^[^#]\S*\W*\w*\W*\d*$', line) and line:
hostname, portname, portnumber = line.split()
if debug:
print
print "Host:\t%s" % hostname
print "Descr:\t%s" % portname
print "Port:\t%s" % portnumber
if ports.has_key(hostname):
ports[hostname][portname] = int(portnumber)
else:
ports[hostname] = {portname: int(portnumber)}
else:
if re.match('^[^#].*$', line) and line:
print "Format check failed for line:"
print line
return(ports)
"""
_______________________________________________________________________________
portping
_______________________________________________________________________________
"""
def portping(hostname, port):
"""
Ping a port and return the result in status string.
hostname: The hostname of the machine.
port: The port number.
"""
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(5)
status = "success"
try:
s.connect((hostname, port))
except socket.gaierror, e:
status = "gaierror: %s" % e[1]
except socket.error, e:
status = "error: %s" % e[0]
except socket.timeout, e:
status = "timeout: %s" % e[1]
except:
status = "portping: Unknown error."
s.close()
return(status)
"""
_______________________________________________________________________________
usage
_______________________________________________________________________________
"""
def usage():
print "portchecker -c <configuration file> [-f]"
print " %-24s" % "-c <config file>",
print "Name of the configuration file located in /usr/share/portchecker/conf"
print " %-24s" % "-f",
print "Firewall check; only throw an error if there is a timeout."
print " %-24s" % "-l",
print "List the configuration files located in /usr/share/portchecker/conf."
"""
_______________________________________________________________________________
printport
_______________________________________________________________________________
"""
def printport(hostname, portname):
print "%-24s" % hostname,
print "%-24s" % portname,
print "%-6d" % ports[hostname][portname],
print "\t",
print "%s" % status
"""
_______________________________________________________________________________
listconfigs
_______________________________________________________________________________
"""
def listconfigs():
print "Config files located in %s:" % portscpath
filenames = os.listdir(portscpath)
for filename in filenames:
print "\t%s" % filename
"""
_______________________________________________________________________________
Main
_______________________________________________________________________________
"""
try:
opts, args = getopt.getopt(sys.argv[1:], "c:fl")
except getopt.GetoptError, err:
print str(err)
usage()
sys.exit(2)
portscpath = "/usr/share/portchecker/"
portsfile = "ports.txt"
mode = "NORMAL"
for opt, arg in opts:
if opt == "-c":
portsfile = arg
elif opt == "-f":
mode = "FW_CHECK"
elif opt == "-l":
listconfigs()
sys.exit()
ports = readportsfile(portsfile)
for hostname in ports:
for portname in ports[hostname]:
status = portping(hostname, ports[hostname][portname])
if mode == "NORMAL":
printport(hostname, portname)
elif mode == "FW_CHECK" and re.match('^.*timed out.*$', status) :
printport(hostname, portname)
| bbolander/codex | python/portchecker/portchecker.py | Python | apache-2.0 | 5,154 | [
"Brian"
] | 163ad5f69cf40ee9ba0f9728e93bd38b93510d407a279d8ec247a1c3da1e73d6 |
import datetime
from businesstime.holidays import Holidays
class USFederalHolidays(Holidays):
"""
List from http://www.opm.gov/policy-data-oversight/snow-dismissal-procedures/federal-holidays/
"""
rules = [
dict(name="New Year's Day", month=1, day=1),
dict(
name="Birthday of Martin Luther King, Jr.",
month=1,
weekday=0,
week=3),
dict(name="Washington's Birthday", month=2, weekday=0, week=3),
dict(name="Memorial Day", month=5, weekday=0, week=-1),
dict(name="Independence Day", month=7, day=4),
dict(name="Labor Day", month=9, weekday=0, week=1),
dict(name="Columbus Day", month=10, weekday=0, week=2),
dict(name="Veterans Day", month=11, day=11),
dict(name="Thanksgiving Day", month=11, weekday=3, week=4),
dict(name="Christmas Day", month=12, day=25),
]
def _day_rule_matches(self, rule, dt):
"""
Day-of-month-specific US federal holidays that fall on Sat or Sun are
observed on Fri or Mon respectively. Note that this method considers
both the actual holiday and the day of observance to be holidays.
"""
if dt.weekday() == 4:
sat = dt + datetime.timedelta(days=1)
if super(USFederalHolidays, self)._day_rule_matches(rule, sat):
return True
elif dt.weekday() == 0:
sun = dt - datetime.timedelta(days=1)
if super(USFederalHolidays, self)._day_rule_matches(rule, sun):
return True
return super(USFederalHolidays, self)._day_rule_matches(rule, dt)
| seatgeek/businesstime | businesstime/holidays/usa.py | Python | bsd-2-clause | 1,651 | [
"COLUMBUS"
] | af61c9046a5aee41e855bd9cfc7f51d0922ec59e6c1f21c8cdd19e49657eff36 |
#
# Copyright (c) 2000 Autonomous Zone Industries
# This file is licensed under the
# GNU Lesser General Public License v2.1.
# See the file COPYING or visit http://www.gnu.org/ for details.
#
import evilcryptopp
__doc__ = evilcryptopp._randsource_doc
get = evilcryptopp._randsource_get
import time
import sys
import os
import sha
import string
if sys.platform == 'win32':
# this is part of the win32all python package, get it from:
# http://www.activestate.com/Products/ActivePython/win32all.html
import win32api
# our modules
def add(seedbytes, entropybits):
evilcryptopp._randsource_add(seedbytes, entropybits)
# TODO add entropy gathering for other OSes
if sys.platform == "win32" :
print 'WARNING: a better random entropy source is needed for this OS\n'
# Anyone know good ways to gather more starting entropy on windows?
shabits = sha.sha()
shabits.update(str(win32api.GetCursorPos()))
shabits.update(str(time.time()))
shabits.update(sys.exec_prefix)
shabits.update(str(time.time()))
shabits.update(str(win32api.GetCursorPos()))
shabits.update(str(os.environ))
shabits.update(str(win32api.GetCursorPos()))
shabits.update(str(time.time()))
shabits.update(str(win32api.GetCurrentProcessId()))
shabits.update(str(sys.dllhandle))
add(shabits.digest(), 160)
elif string.find(sys.platform, "linux") >= 0 :
urandomdata = open('/dev/urandom', 'rb').read(20)
add(urandomdata, len(urandomdata)*8)
elif string.find(string.lower(sys.platform), "bsd") >= 0 :
urandomdata = open('/dev/urandom', 'rb').read(20)
add(urandomdata, len(urandomdata)*8)
else :
print 'WARNING: a better random entropy source is needed for this OS\n'
add(sha.sha( sys.platform + sys.version + str(time.time()) ).digest(), 160)
| zooko/egtp | common/crypto/Python/randsource.py | Python | agpl-3.0 | 1,809 | [
"VisIt"
] | f6ead95fbd66770e6b49d28bb99cc660222b46b175d85f1aacd1fa68e41fcd78 |
# Copyright 2013 Velodyne Acoustics, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# if VeloView runs from a build directory then we need
# to add ParaView python modules to the sys.path.
import sys
import os
def getParaViewBuildDir():
appDir = os.path.dirname(sys.executable)
for searchDir in ['../../../../', '../']:
cmakeCache = os.path.join(appDir, searchDir, 'CMakeCache.txt')
if os.path.isfile(cmakeCache):
for line in open(cmakeCache, 'r'):
if line.startswith('ParaView_DIR'):
return line.strip().split('=')[1]
def addParaViewPath():
paraviewBuildDir = getParaViewBuildDir()
if paraviewBuildDir:
sys.path.append(os.path.join(paraviewBuildDir, 'lib'))
sys.path.append(os.path.join(paraviewBuildDir, 'lib/site-packages'))
addParaViewPath()
| frizaro/Veloview | VelodyneHDL/python/veloview/__init__.py | Python | apache-2.0 | 1,353 | [
"ParaView"
] | 23cb3d8ab4a542f98b76dfcb2304520238b46c2c6952a590322fcb8f8672632d |
import contextlib
import collections
import pickle
import re
import sys
from unittest import TestCase, main, skipUnless, SkipTest
from copy import copy, deepcopy
from typing import Any
from typing import TypeVar, AnyStr
from typing import T, KT, VT # Not in __all__.
from typing import Union, Optional
from typing import Tuple, List, MutableMapping
from typing import Callable
from typing import Generic, ClassVar
from typing import cast
from typing import get_type_hints
from typing import no_type_check, no_type_check_decorator
from typing import Type
from typing import NewType
from typing import NamedTuple
from typing import IO, TextIO, BinaryIO
from typing import Pattern, Match
import abc
import typing
try:
import collections.abc as collections_abc
except ImportError:
import collections as collections_abc # Fallback for PY3.2.
class BaseTestCase(TestCase):
def assertIsSubclass(self, cls, class_or_tuple, msg=None):
if not issubclass(cls, class_or_tuple):
message = '%r is not a subclass of %r' % (cls, class_or_tuple)
if msg is not None:
message += ' : %s' % msg
raise self.failureException(message)
def assertNotIsSubclass(self, cls, class_or_tuple, msg=None):
if issubclass(cls, class_or_tuple):
message = '%r is a subclass of %r' % (cls, class_or_tuple)
if msg is not None:
message += ' : %s' % msg
raise self.failureException(message)
def clear_caches(self):
for f in typing._cleanups:
f()
class Employee:
pass
class Manager(Employee):
pass
class Founder(Employee):
pass
class ManagingFounder(Manager, Founder):
pass
class AnyTests(BaseTestCase):
def test_any_instance_type_error(self):
with self.assertRaises(TypeError):
isinstance(42, Any)
def test_any_subclass_type_error(self):
with self.assertRaises(TypeError):
issubclass(Employee, Any)
with self.assertRaises(TypeError):
issubclass(Any, Employee)
def test_repr(self):
self.assertEqual(repr(Any), 'typing.Any')
def test_errors(self):
with self.assertRaises(TypeError):
issubclass(42, Any)
with self.assertRaises(TypeError):
Any[int] # Any is not a generic type.
def test_cannot_subclass(self):
with self.assertRaises(TypeError):
class A(Any):
pass
with self.assertRaises(TypeError):
class A(type(Any)):
pass
def test_cannot_instantiate(self):
with self.assertRaises(TypeError):
Any()
with self.assertRaises(TypeError):
type(Any)()
def test_cannot_subscript(self):
with self.assertRaises(TypeError):
Any[int]
def test_any_works_with_alias(self):
# These expressions must simply not fail.
typing.Match[Any]
typing.Pattern[Any]
typing.IO[Any]
class TypeVarTests(BaseTestCase):
def test_basic_plain(self):
T = TypeVar('T')
# T equals itself.
self.assertEqual(T, T)
# T is an instance of TypeVar
self.assertIsInstance(T, TypeVar)
def test_typevar_instance_type_error(self):
T = TypeVar('T')
with self.assertRaises(TypeError):
isinstance(42, T)
def test_typevar_subclass_type_error(self):
T = TypeVar('T')
with self.assertRaises(TypeError):
issubclass(int, T)
with self.assertRaises(TypeError):
issubclass(T, int)
def test_constrained_error(self):
with self.assertRaises(TypeError):
X = TypeVar('X', int)
X
def test_union_unique(self):
X = TypeVar('X')
Y = TypeVar('Y')
self.assertNotEqual(X, Y)
self.assertEqual(Union[X], X)
self.assertNotEqual(Union[X], Union[X, Y])
self.assertEqual(Union[X, X], X)
self.assertNotEqual(Union[X, int], Union[X])
self.assertNotEqual(Union[X, int], Union[int])
self.assertEqual(Union[X, int].__args__, (X, int))
self.assertEqual(Union[X, int].__parameters__, (X,))
self.assertIs(Union[X, int].__origin__, Union)
def test_union_constrained(self):
A = TypeVar('A', str, bytes)
self.assertNotEqual(Union[A, str], Union[A])
def test_repr(self):
self.assertEqual(repr(T), '~T')
self.assertEqual(repr(KT), '~KT')
self.assertEqual(repr(VT), '~VT')
self.assertEqual(repr(AnyStr), '~AnyStr')
T_co = TypeVar('T_co', covariant=True)
self.assertEqual(repr(T_co), '+T_co')
T_contra = TypeVar('T_contra', contravariant=True)
self.assertEqual(repr(T_contra), '-T_contra')
def test_no_redefinition(self):
self.assertNotEqual(TypeVar('T'), TypeVar('T'))
self.assertNotEqual(TypeVar('T', int, str), TypeVar('T', int, str))
def test_cannot_subclass_vars(self):
with self.assertRaises(TypeError):
class V(TypeVar('T')):
pass
def test_cannot_subclass_var_itself(self):
with self.assertRaises(TypeError):
class V(TypeVar):
pass
def test_cannot_instantiate_vars(self):
with self.assertRaises(TypeError):
TypeVar('A')()
def test_bound_errors(self):
with self.assertRaises(TypeError):
TypeVar('X', bound=42)
with self.assertRaises(TypeError):
TypeVar('X', str, float, bound=Employee)
class UnionTests(BaseTestCase):
def test_basics(self):
u = Union[int, float]
self.assertNotEqual(u, Union)
def test_subclass_error(self):
with self.assertRaises(TypeError):
issubclass(int, Union)
with self.assertRaises(TypeError):
issubclass(Union, int)
with self.assertRaises(TypeError):
issubclass(int, Union[int, str])
with self.assertRaises(TypeError):
issubclass(Union[int, str], int)
def test_union_any(self):
u = Union[Any]
self.assertEqual(u, Any)
u1 = Union[int, Any]
u2 = Union[Any, int]
u3 = Union[Any, object]
self.assertEqual(u1, u2)
self.assertNotEqual(u1, Any)
self.assertNotEqual(u2, Any)
self.assertNotEqual(u3, Any)
def test_union_object(self):
u = Union[object]
self.assertEqual(u, object)
u = Union[int, object]
self.assertEqual(u, object)
u = Union[object, int]
self.assertEqual(u, object)
def test_unordered(self):
u1 = Union[int, float]
u2 = Union[float, int]
self.assertEqual(u1, u2)
def test_single_class_disappears(self):
t = Union[Employee]
self.assertIs(t, Employee)
def test_base_class_disappears(self):
u = Union[Employee, Manager, int]
self.assertEqual(u, Union[int, Employee])
u = Union[Manager, int, Employee]
self.assertEqual(u, Union[int, Employee])
u = Union[Employee, Manager]
self.assertIs(u, Employee)
def test_union_union(self):
u = Union[int, float]
v = Union[u, Employee]
self.assertEqual(v, Union[int, float, Employee])
def test_repr(self):
self.assertEqual(repr(Union), 'typing.Union')
u = Union[Employee, int]
self.assertEqual(repr(u), 'typing.Union[%s.Employee, int]' % __name__)
u = Union[int, Employee]
self.assertEqual(repr(u), 'typing.Union[int, %s.Employee]' % __name__)
def test_cannot_subclass(self):
with self.assertRaises(TypeError):
class C(Union):
pass
with self.assertRaises(TypeError):
class C(type(Union)):
pass
with self.assertRaises(TypeError):
class C(Union[int, str]):
pass
def test_cannot_instantiate(self):
with self.assertRaises(TypeError):
Union()
with self.assertRaises(TypeError):
type(Union)()
u = Union[int, float]
with self.assertRaises(TypeError):
u()
with self.assertRaises(TypeError):
type(u)()
def test_union_generalization(self):
self.assertFalse(Union[str, typing.Iterable[int]] == str)
self.assertFalse(Union[str, typing.Iterable[int]] == typing.Iterable[int])
self.assertTrue(Union[str, typing.Iterable] == typing.Iterable)
def test_optional(self):
o = Optional[int]
u = Union[int, None]
self.assertEqual(o, u)
def test_empty(self):
with self.assertRaises(TypeError):
Union[()]
def test_union_instance_type_error(self):
with self.assertRaises(TypeError):
isinstance(42, Union[int, str])
def test_union_str_pattern(self):
# Shouldn't crash; see http://bugs.python.org/issue25390
A = Union[str, Pattern]
A
def test_etree(self):
# See https://github.com/python/typing/issues/229
# (Only relevant for Python 2.)
try:
from xml.etree.cElementTree import Element
except ImportError:
raise SkipTest("cElementTree not found")
Union[Element, str] # Shouldn't crash
def Elem(*args):
return Element(*args)
Union[Elem, str] # Nor should this
class TupleTests(BaseTestCase):
def test_basics(self):
with self.assertRaises(TypeError):
issubclass(Tuple, Tuple[int, str])
with self.assertRaises(TypeError):
issubclass(tuple, Tuple[int, str])
class TP(tuple): ...
self.assertTrue(issubclass(tuple, Tuple))
self.assertTrue(issubclass(TP, Tuple))
def test_equality(self):
self.assertEqual(Tuple[int], Tuple[int])
self.assertEqual(Tuple[int, ...], Tuple[int, ...])
self.assertNotEqual(Tuple[int], Tuple[int, int])
self.assertNotEqual(Tuple[int], Tuple[int, ...])
def test_tuple_subclass(self):
class MyTuple(tuple):
pass
self.assertTrue(issubclass(MyTuple, Tuple))
def test_tuple_instance_type_error(self):
with self.assertRaises(TypeError):
isinstance((0, 0), Tuple[int, int])
self.assertIsInstance((0, 0), Tuple)
def test_repr(self):
self.assertEqual(repr(Tuple), 'typing.Tuple')
self.assertEqual(repr(Tuple[()]), 'typing.Tuple[()]')
self.assertEqual(repr(Tuple[int, float]), 'typing.Tuple[int, float]')
self.assertEqual(repr(Tuple[int, ...]), 'typing.Tuple[int, ...]')
def test_errors(self):
with self.assertRaises(TypeError):
issubclass(42, Tuple)
with self.assertRaises(TypeError):
issubclass(42, Tuple[int])
class CallableTests(BaseTestCase):
def test_self_subclass(self):
with self.assertRaises(TypeError):
self.assertTrue(issubclass(type(lambda x: x), Callable[[int], int]))
self.assertTrue(issubclass(type(lambda x: x), Callable))
def test_eq_hash(self):
self.assertEqual(Callable[[int], int], Callable[[int], int])
self.assertEqual(len({Callable[[int], int], Callable[[int], int]}), 1)
self.assertNotEqual(Callable[[int], int], Callable[[int], str])
self.assertNotEqual(Callable[[int], int], Callable[[str], int])
self.assertNotEqual(Callable[[int], int], Callable[[int, int], int])
self.assertNotEqual(Callable[[int], int], Callable[[], int])
self.assertNotEqual(Callable[[int], int], Callable)
def test_cannot_instantiate(self):
with self.assertRaises(TypeError):
Callable()
with self.assertRaises(TypeError):
type(Callable)()
c = Callable[[int], str]
with self.assertRaises(TypeError):
c()
with self.assertRaises(TypeError):
type(c)()
def test_callable_wrong_forms(self):
with self.assertRaises(TypeError):
Callable[[...], int]
with self.assertRaises(TypeError):
Callable[(), int]
with self.assertRaises(TypeError):
Callable[[()], int]
with self.assertRaises(TypeError):
Callable[[int, 1], 2]
def test_callable_instance_works(self):
def f():
pass
self.assertIsInstance(f, Callable)
self.assertNotIsInstance(None, Callable)
def test_callable_instance_type_error(self):
def f():
pass
with self.assertRaises(TypeError):
self.assertIsInstance(f, Callable[[], None])
with self.assertRaises(TypeError):
self.assertIsInstance(f, Callable[[], Any])
with self.assertRaises(TypeError):
self.assertNotIsInstance(None, Callable[[], None])
with self.assertRaises(TypeError):
self.assertNotIsInstance(None, Callable[[], Any])
def test_repr(self):
ct0 = Callable[[], bool]
self.assertEqual(repr(ct0), 'typing.Callable[[], bool]')
ct2 = Callable[[str, float], int]
self.assertEqual(repr(ct2), 'typing.Callable[[str, float], int]')
ctv = Callable[..., str]
self.assertEqual(repr(ctv), 'typing.Callable[..., str]')
def test_callable_with_ellipsis(self):
def foo(a: Callable[..., T]):
pass
self.assertEqual(get_type_hints(foo, globals(), locals()),
{'a': Callable[..., T]})
def test_ellipsis_in_generic(self):
# Shouldn't crash; see https://github.com/python/typing/issues/259
typing.List[Callable[..., str]]
XK = TypeVar('XK', str, bytes)
XV = TypeVar('XV')
class SimpleMapping(Generic[XK, XV]):
def __getitem__(self, key: XK) -> XV:
...
def __setitem__(self, key: XK, value: XV):
...
def get(self, key: XK, default: XV = None) -> XV:
...
class MySimpleMapping(SimpleMapping[XK, XV]):
def __init__(self):
self.store = {}
def __getitem__(self, key: str):
return self.store[key]
def __setitem__(self, key: str, value):
self.store[key] = value
def get(self, key: str, default=None):
try:
return self.store[key]
except KeyError:
return default
class ProtocolTests(BaseTestCase):
def test_supports_int(self):
self.assertIsSubclass(int, typing.SupportsInt)
self.assertNotIsSubclass(str, typing.SupportsInt)
def test_supports_float(self):
self.assertIsSubclass(float, typing.SupportsFloat)
self.assertNotIsSubclass(str, typing.SupportsFloat)
def test_supports_complex(self):
# Note: complex itself doesn't have __complex__.
class C:
def __complex__(self):
return 0j
self.assertIsSubclass(C, typing.SupportsComplex)
self.assertNotIsSubclass(str, typing.SupportsComplex)
def test_supports_bytes(self):
# Note: bytes itself doesn't have __bytes__.
class B:
def __bytes__(self):
return b''
self.assertIsSubclass(B, typing.SupportsBytes)
self.assertNotIsSubclass(str, typing.SupportsBytes)
def test_supports_abs(self):
self.assertIsSubclass(float, typing.SupportsAbs)
self.assertIsSubclass(int, typing.SupportsAbs)
self.assertNotIsSubclass(str, typing.SupportsAbs)
def test_supports_round(self):
issubclass(float, typing.SupportsRound)
self.assertIsSubclass(float, typing.SupportsRound)
self.assertIsSubclass(int, typing.SupportsRound)
self.assertNotIsSubclass(str, typing.SupportsRound)
def test_reversible(self):
self.assertIsSubclass(list, typing.Reversible)
self.assertNotIsSubclass(int, typing.Reversible)
def test_protocol_instance_type_error(self):
with self.assertRaises(TypeError):
isinstance(0, typing.SupportsAbs)
class C1(typing.SupportsInt):
def __int__(self) -> int:
return 42
class C2(C1):
pass
c = C2()
self.assertIsInstance(c, C1)
class GenericTests(BaseTestCase):
def test_basics(self):
X = SimpleMapping[str, Any]
self.assertEqual(X.__parameters__, ())
with self.assertRaises(TypeError):
X[str]
with self.assertRaises(TypeError):
X[str, str]
Y = SimpleMapping[XK, str]
self.assertEqual(Y.__parameters__, (XK,))
Y[str]
with self.assertRaises(TypeError):
Y[str, str]
def test_generic_errors(self):
T = TypeVar('T')
with self.assertRaises(TypeError):
Generic[T]()
with self.assertRaises(TypeError):
isinstance([], List[int])
with self.assertRaises(TypeError):
issubclass(list, List[int])
def test_init(self):
T = TypeVar('T')
S = TypeVar('S')
with self.assertRaises(TypeError):
Generic[T, T]
with self.assertRaises(TypeError):
Generic[T, S, T]
def test_repr(self):
self.assertEqual(repr(SimpleMapping),
__name__ + '.' + 'SimpleMapping')
self.assertEqual(repr(MySimpleMapping),
__name__ + '.' + 'MySimpleMapping')
def test_chain_repr(self):
T = TypeVar('T')
S = TypeVar('S')
class C(Generic[T]):
pass
X = C[Tuple[S, T]]
self.assertEqual(X, C[Tuple[S, T]])
self.assertNotEqual(X, C[Tuple[T, S]])
Y = X[T, int]
self.assertEqual(Y, X[T, int])
self.assertNotEqual(Y, X[S, int])
self.assertNotEqual(Y, X[T, str])
Z = Y[str]
self.assertEqual(Z, Y[str])
self.assertNotEqual(Z, Y[int])
self.assertNotEqual(Z, Y[T])
self.assertTrue(str(Z).endswith(
'.C[typing.Tuple[str, int]]'))
def test_new_repr(self):
T = TypeVar('T')
U = TypeVar('U', covariant=True)
S = TypeVar('S')
self.assertEqual(repr(List), 'typing.List')
self.assertEqual(repr(List[T]), 'typing.List[~T]')
self.assertEqual(repr(List[U]), 'typing.List[+U]')
self.assertEqual(repr(List[S][T][int]), 'typing.List[int]')
self.assertEqual(repr(List[int]), 'typing.List[int]')
def test_new_repr_complex(self):
T = TypeVar('T')
TS = TypeVar('TS')
self.assertEqual(repr(typing.Mapping[T, TS][TS, T]), 'typing.Mapping[~TS, ~T]')
self.assertEqual(repr(List[Tuple[T, TS]][int, T]),
'typing.List[typing.Tuple[int, ~T]]')
self.assertEqual(repr(List[Tuple[T, T]][List[int]]),
'typing.List[typing.Tuple[typing.List[int], typing.List[int]]]')
def test_new_repr_bare(self):
T = TypeVar('T')
self.assertEqual(repr(Generic[T]), 'typing.Generic[~T]')
self.assertEqual(repr(typing._Protocol[T]), 'typing.Protocol[~T]')
class C(typing.Dict[Any, Any]): ...
# this line should just work
repr(C.__mro__)
def test_dict(self):
T = TypeVar('T')
class B(Generic[T]):
pass
b = B()
b.foo = 42
self.assertEqual(b.__dict__, {'foo': 42})
class C(B[int]):
pass
c = C()
c.bar = 'abc'
self.assertEqual(c.__dict__, {'bar': 'abc'})
def test_false_subclasses(self):
class MyMapping(MutableMapping[str, str]): pass
self.assertNotIsInstance({}, MyMapping)
self.assertNotIsSubclass(dict, MyMapping)
def test_abc_bases(self):
class MM(MutableMapping[str, str]):
def __getitem__(self, k):
return None
def __setitem__(self, k, v):
pass
def __delitem__(self, k):
pass
def __iter__(self):
return iter(())
def __len__(self):
return 0
# this should just work
MM().update()
self.assertIsInstance(MM(), collections_abc.MutableMapping)
self.assertIsInstance(MM(), MutableMapping)
self.assertNotIsInstance(MM(), List)
self.assertNotIsInstance({}, MM)
def test_multiple_bases(self):
class MM1(MutableMapping[str, str], collections_abc.MutableMapping):
pass
with self.assertRaises(TypeError):
# consistent MRO not possible
class MM2(collections_abc.MutableMapping, MutableMapping[str, str]):
pass
def test_orig_bases(self):
T = TypeVar('T')
class C(typing.Dict[str, T]): ...
self.assertEqual(C.__orig_bases__, (typing.Dict[str, T],))
def test_naive_runtime_checks(self):
def naive_dict_check(obj, tp):
# Check if a dictionary conforms to Dict type
if len(tp.__parameters__) > 0:
raise NotImplementedError
if tp.__args__:
KT, VT = tp.__args__
return all(isinstance(k, KT) and isinstance(v, VT)
for k, v in obj.items())
self.assertTrue(naive_dict_check({'x': 1}, typing.Dict[str, int]))
self.assertFalse(naive_dict_check({1: 'x'}, typing.Dict[str, int]))
with self.assertRaises(NotImplementedError):
naive_dict_check({1: 'x'}, typing.Dict[str, T])
def naive_generic_check(obj, tp):
# Check if an instance conforms to the generic class
if not hasattr(obj, '__orig_class__'):
raise NotImplementedError
return obj.__orig_class__ == tp
class Node(Generic[T]): ...
self.assertTrue(naive_generic_check(Node[int](), Node[int]))
self.assertFalse(naive_generic_check(Node[str](), Node[int]))
self.assertFalse(naive_generic_check(Node[str](), List))
with self.assertRaises(NotImplementedError):
naive_generic_check([1,2,3], Node[int])
def naive_list_base_check(obj, tp):
# Check if list conforms to a List subclass
return all(isinstance(x, tp.__orig_bases__[0].__args__[0])
for x in obj)
class C(List[int]): ...
self.assertTrue(naive_list_base_check([1, 2, 3], C))
self.assertFalse(naive_list_base_check(['a', 'b'], C))
def test_multi_subscr_base(self):
T = TypeVar('T')
U = TypeVar('U')
V = TypeVar('V')
class C(List[T][U][V]): ...
class D(C, List[T][U][V]): ...
self.assertEqual(C.__parameters__, (V,))
self.assertEqual(D.__parameters__, (V,))
self.assertEqual(C[int].__parameters__, ())
self.assertEqual(D[int].__parameters__, ())
self.assertEqual(C[int].__args__, (int,))
self.assertEqual(D[int].__args__, (int,))
self.assertEqual(C.__bases__, (List,))
self.assertEqual(D.__bases__, (C, List))
self.assertEqual(C.__orig_bases__, (List[T][U][V],))
self.assertEqual(D.__orig_bases__, (C, List[T][U][V]))
def test_extended_generic_rules_eq(self):
T = TypeVar('T')
U = TypeVar('U')
self.assertEqual(Tuple[T, T][int], Tuple[int, int])
self.assertEqual(typing.Iterable[Tuple[T, T]][T], typing.Iterable[Tuple[T, T]])
with self.assertRaises(TypeError):
Tuple[T, int][()]
with self.assertRaises(TypeError):
Tuple[T, U][T, ...]
self.assertEqual(Union[T, int][int], int)
self.assertEqual(Union[T, U][int, Union[int, str]], Union[int, str])
class Base: ...
class Derived(Base): ...
self.assertEqual(Union[T, Base][Derived], Base)
with self.assertRaises(TypeError):
Union[T, int][1]
self.assertEqual(Callable[[T], T][KT], Callable[[KT], KT])
self.assertEqual(Callable[..., List[T]][int], Callable[..., List[int]])
with self.assertRaises(TypeError):
Callable[[T], U][..., int]
with self.assertRaises(TypeError):
Callable[[T], U][[], int]
def test_extended_generic_rules_repr(self):
T = TypeVar('T')
self.assertEqual(repr(Union[Tuple, Callable]).replace('typing.', ''),
'Union[Tuple, Callable]')
self.assertEqual(repr(Union[Tuple, Tuple[int]]).replace('typing.', ''),
'Tuple')
self.assertEqual(repr(Callable[..., Optional[T]][int]).replace('typing.', ''),
'Callable[..., Union[int, NoneType]]')
self.assertEqual(repr(Callable[[], List[T]][int]).replace('typing.', ''),
'Callable[[], List[int]]')
def test_generic_forward_ref(self):
def foobar(x: List[List['CC']]): ...
class CC: ...
self.assertEqual(get_type_hints(foobar, globals(), locals()), {'x': List[List[CC]]})
T = TypeVar('T')
AT = Tuple[T, ...]
def barfoo(x: AT): ...
self.assertIs(get_type_hints(barfoo, globals(), locals())['x'], AT)
CT = Callable[..., List[T]]
def barfoo2(x: CT): ...
self.assertIs(get_type_hints(barfoo2, globals(), locals())['x'], CT)
def test_extended_generic_rules_subclassing(self):
class T1(Tuple[T, KT]): ...
class T2(Tuple[T, ...]): ...
class C1(Callable[[T], T]): ...
class C2(Callable[..., int]):
def __call__(self):
return None
self.assertEqual(T1.__parameters__, (T, KT))
self.assertEqual(T1[int, str].__args__, (int, str))
self.assertEqual(T1[int, T].__origin__, T1)
self.assertEqual(T2.__parameters__, (T,))
with self.assertRaises(TypeError):
T1[int]
with self.assertRaises(TypeError):
T2[int, str]
self.assertEqual(repr(C1[int]).split('.')[-1], 'C1[int]')
self.assertEqual(C2.__parameters__, ())
self.assertIsInstance(C2(), collections_abc.Callable)
self.assertIsSubclass(C2, collections_abc.Callable)
self.assertIsSubclass(C1, collections_abc.Callable)
self.assertIsInstance(T1(), tuple)
self.assertIsSubclass(T2, tuple)
self.assertIsSubclass(Tuple[int, ...], typing.Sequence)
self.assertIsSubclass(Tuple[int, ...], typing.Iterable)
def test_fail_with_bare_union(self):
with self.assertRaises(TypeError):
List[Union]
with self.assertRaises(TypeError):
Tuple[Optional]
with self.assertRaises(TypeError):
ClassVar[ClassVar]
with self.assertRaises(TypeError):
List[ClassVar[int]]
def test_fail_with_bare_generic(self):
T = TypeVar('T')
with self.assertRaises(TypeError):
List[Generic]
with self.assertRaises(TypeError):
Tuple[Generic[T]]
with self.assertRaises(TypeError):
List[typing._Protocol]
def test_type_erasure_special(self):
T = TypeVar('T')
# this is the only test that checks type caching
self.clear_caches()
class MyTup(Tuple[T, T]): ...
self.assertIs(MyTup[int]().__class__, MyTup)
self.assertIs(MyTup[int]().__orig_class__, MyTup[int])
class MyCall(Callable[..., T]):
def __call__(self): return None
self.assertIs(MyCall[T]().__class__, MyCall)
self.assertIs(MyCall[T]().__orig_class__, MyCall[T])
class MyDict(typing.Dict[T, T]): ...
self.assertIs(MyDict[int]().__class__, MyDict)
self.assertIs(MyDict[int]().__orig_class__, MyDict[int])
class MyDef(typing.DefaultDict[str, T]): ...
self.assertIs(MyDef[int]().__class__, MyDef)
self.assertIs(MyDef[int]().__orig_class__, MyDef[int])
def test_all_repr_eq_any(self):
objs = (getattr(typing, el) for el in typing.__all__)
for obj in objs:
self.assertNotEqual(repr(obj), '')
self.assertEqual(obj, obj)
if getattr(obj, '__parameters__', None) and len(obj.__parameters__) == 1:
self.assertEqual(obj[Any].__args__, (Any,))
if isinstance(obj, type):
for base in obj.__mro__:
self.assertNotEqual(repr(base), '')
self.assertEqual(base, base)
def test_substitution_helper(self):
T = TypeVar('T')
KT = TypeVar('KT')
VT = TypeVar('VT')
class Map(Generic[KT, VT]):
def meth(self, k: KT, v: VT): ...
StrMap = Map[str, T]
obj = StrMap[int]()
new_args = typing._subs_tree(obj.__orig_class__)
new_annots = {k: typing._replace_arg(v, type(obj).__parameters__, new_args)
for k, v in obj.meth.__annotations__.items()}
self.assertEqual(new_annots, {'k': str, 'v': int})
def test_pickle(self):
global C # pickle wants to reference the class by name
T = TypeVar('T')
class B(Generic[T]):
pass
class C(B[int]):
pass
c = C()
c.foo = 42
c.bar = 'abc'
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
z = pickle.dumps(c, proto)
x = pickle.loads(z)
self.assertEqual(x.foo, 42)
self.assertEqual(x.bar, 'abc')
self.assertEqual(x.__dict__, {'foo': 42, 'bar': 'abc'})
simples = [Any, Union, Tuple, Callable, ClassVar, List, typing.Iterable]
for s in simples:
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
z = pickle.dumps(s, proto)
x = pickle.loads(z)
self.assertEqual(s, x)
def test_copy_and_deepcopy(self):
T = TypeVar('T')
class Node(Generic[T]): ...
things = [Union[T, int], Tuple[T, int], Callable[..., T], Callable[[int], int],
Tuple[Any, Any], Node[T], Node[int], Node[Any], typing.Iterable[T],
typing.Iterable[Any], typing.Iterable[int], typing.Dict[int, str],
typing.Dict[T, Any], ClassVar[int], ClassVar[List[T]], Tuple['T', 'T'],
Union['T', int], List['T'], typing.Mapping['T', int]]
for t in things + [Any]:
self.assertEqual(t, copy(t))
self.assertEqual(t, deepcopy(t))
def test_parameterized_slots(self):
T = TypeVar('T')
class C(Generic[T]):
__slots__ = ('potato',)
c = C()
c_int = C[int]()
self.assertEqual(C.__slots__, C[str].__slots__)
c.potato = 0
c_int.potato = 0
with self.assertRaises(AttributeError):
c.tomato = 0
with self.assertRaises(AttributeError):
c_int.tomato = 0
def foo(x: C['C']): ...
self.assertEqual(get_type_hints(foo, globals(), locals())['x'], C[C])
self.assertEqual(get_type_hints(foo, globals(), locals())['x'].__slots__,
C.__slots__)
self.assertEqual(copy(C[int]), deepcopy(C[int]))
def test_parameterized_slots_dict(self):
T = TypeVar('T')
class D(Generic[T]):
__slots__ = {'banana': 42}
d = D()
d_int = D[int]()
self.assertEqual(D.__slots__, D[str].__slots__)
d.banana = 'yes'
d_int.banana = 'yes'
with self.assertRaises(AttributeError):
d.foobar = 'no'
with self.assertRaises(AttributeError):
d_int.foobar = 'no'
def test_errors(self):
with self.assertRaises(TypeError):
B = SimpleMapping[XK, Any]
class C(Generic[B]):
pass
def test_repr_2(self):
PY32 = sys.version_info[:2] < (3, 3)
class C(Generic[T]):
pass
self.assertEqual(C.__module__, __name__)
if not PY32:
self.assertEqual(C.__qualname__,
'GenericTests.test_repr_2.<locals>.C')
self.assertEqual(repr(C).split('.')[-1], 'C')
X = C[int]
self.assertEqual(X.__module__, __name__)
if not PY32:
self.assertTrue(X.__qualname__.endswith('.<locals>.C'))
self.assertEqual(repr(X).split('.')[-1], 'C[int]')
class Y(C[int]):
pass
self.assertEqual(Y.__module__, __name__)
if not PY32:
self.assertEqual(Y.__qualname__,
'GenericTests.test_repr_2.<locals>.Y')
self.assertEqual(repr(Y).split('.')[-1], 'Y')
def test_eq_1(self):
self.assertEqual(Generic, Generic)
self.assertEqual(Generic[T], Generic[T])
self.assertNotEqual(Generic[KT], Generic[VT])
def test_eq_2(self):
class A(Generic[T]):
pass
class B(Generic[T]):
pass
self.assertEqual(A, A)
self.assertNotEqual(A, B)
self.assertEqual(A[T], A[T])
self.assertNotEqual(A[T], B[T])
def test_multiple_inheritance(self):
class A(Generic[T, VT]):
pass
class B(Generic[KT, T]):
pass
class C(A[T, VT], Generic[VT, T, KT], B[KT, T]):
pass
self.assertEqual(C.__parameters__, (VT, T, KT))
def test_nested(self):
G = Generic
class Visitor(G[T]):
a = None
def set(self, a: T):
self.a = a
def get(self):
return self.a
def visit(self) -> T:
return self.a
V = Visitor[typing.List[int]]
class IntListVisitor(V):
def append(self, x: int):
self.a.append(x)
a = IntListVisitor()
a.set([])
a.append(1)
a.append(42)
self.assertEqual(a.get(), [1, 42])
def test_type_erasure(self):
T = TypeVar('T')
class Node(Generic[T]):
def __init__(self, label: T,
left: 'Node[T]' = None,
right: 'Node[T]' = None):
self.label = label # type: T
self.left = left # type: Optional[Node[T]]
self.right = right # type: Optional[Node[T]]
def foo(x: T):
a = Node(x)
b = Node[T](x)
c = Node[Any](x)
self.assertIs(type(a), Node)
self.assertIs(type(b), Node)
self.assertIs(type(c), Node)
self.assertEqual(a.label, x)
self.assertEqual(b.label, x)
self.assertEqual(c.label, x)
foo(42)
def test_implicit_any(self):
T = TypeVar('T')
class C(Generic[T]):
pass
class D(C):
pass
self.assertEqual(D.__parameters__, ())
with self.assertRaises(Exception):
D[int]
with self.assertRaises(Exception):
D[Any]
with self.assertRaises(Exception):
D[T]
class ClassVarTests(BaseTestCase):
def test_basics(self):
with self.assertRaises(TypeError):
ClassVar[1]
with self.assertRaises(TypeError):
ClassVar[int, str]
with self.assertRaises(TypeError):
ClassVar[int][str]
def test_repr(self):
self.assertEqual(repr(ClassVar), 'typing.ClassVar')
cv = ClassVar[int]
self.assertEqual(repr(cv), 'typing.ClassVar[int]')
cv = ClassVar[Employee]
self.assertEqual(repr(cv), 'typing.ClassVar[%s.Employee]' % __name__)
def test_cannot_subclass(self):
with self.assertRaises(TypeError):
class C(type(ClassVar)):
pass
with self.assertRaises(TypeError):
class C(type(ClassVar[int])):
pass
def test_cannot_init(self):
with self.assertRaises(TypeError):
ClassVar()
with self.assertRaises(TypeError):
type(ClassVar)()
with self.assertRaises(TypeError):
type(ClassVar[Optional[int]])()
def test_no_isinstance(self):
with self.assertRaises(TypeError):
isinstance(1, ClassVar[int])
with self.assertRaises(TypeError):
issubclass(int, ClassVar)
class CastTests(BaseTestCase):
def test_basics(self):
self.assertEqual(cast(int, 42), 42)
self.assertEqual(cast(float, 42), 42)
self.assertIs(type(cast(float, 42)), int)
self.assertEqual(cast(Any, 42), 42)
self.assertEqual(cast(list, 42), 42)
self.assertEqual(cast(Union[str, float], 42), 42)
self.assertEqual(cast(AnyStr, 42), 42)
self.assertEqual(cast(None, 42), 42)
def test_errors(self):
# Bogus calls are not expected to fail.
cast(42, 42)
cast('hello', 42)
class ForwardRefTests(BaseTestCase):
def test_basics(self):
class Node(Generic[T]):
def __init__(self, label: T):
self.label = label
self.left = self.right = None
def add_both(self,
left: 'Optional[Node[T]]',
right: 'Node[T]' = None,
stuff: int = None,
blah=None):
self.left = left
self.right = right
def add_left(self, node: Optional['Node[T]']):
self.add_both(node, None)
def add_right(self, node: 'Node[T]' = None):
self.add_both(None, node)
t = Node[int]
both_hints = get_type_hints(t.add_both, globals(), locals())
self.assertEqual(both_hints['left'], Optional[Node[T]])
self.assertEqual(both_hints['right'], Optional[Node[T]])
self.assertEqual(both_hints['left'], both_hints['right'])
self.assertEqual(both_hints['stuff'], Optional[int])
self.assertNotIn('blah', both_hints)
left_hints = get_type_hints(t.add_left, globals(), locals())
self.assertEqual(left_hints['node'], Optional[Node[T]])
right_hints = get_type_hints(t.add_right, globals(), locals())
self.assertEqual(right_hints['node'], Optional[Node[T]])
def test_forwardref_instance_type_error(self):
fr = typing._ForwardRef('int')
with self.assertRaises(TypeError):
isinstance(42, fr)
def test_union_forward(self):
def foo(a: Union['T']):
pass
self.assertEqual(get_type_hints(foo, globals(), locals()),
{'a': Union[T]})
def test_tuple_forward(self):
def foo(a: Tuple['T']):
pass
self.assertEqual(get_type_hints(foo, globals(), locals()),
{'a': Tuple[T]})
def test_callable_forward(self):
def foo(a: Callable[['T'], 'T']):
pass
self.assertEqual(get_type_hints(foo, globals(), locals()),
{'a': Callable[[T], T]})
def test_callable_with_ellipsis_forward(self):
def foo(a: 'Callable[..., T]'):
pass
self.assertEqual(get_type_hints(foo, globals(), locals()),
{'a': Callable[..., T]})
def test_syntax_error(self):
with self.assertRaises(SyntaxError):
Generic['/T']
def test_delayed_syntax_error(self):
def foo(a: 'Node[T'):
pass
with self.assertRaises(SyntaxError):
get_type_hints(foo)
def test_type_error(self):
def foo(a: Tuple['42']):
pass
with self.assertRaises(TypeError):
get_type_hints(foo)
def test_name_error(self):
def foo(a: 'Noode[T]'):
pass
with self.assertRaises(NameError):
get_type_hints(foo, locals())
def test_no_type_check(self):
@no_type_check
def foo(a: 'whatevers') -> {}:
pass
th = get_type_hints(foo)
self.assertEqual(th, {})
def test_no_type_check_class(self):
@no_type_check
class C:
def foo(a: 'whatevers') -> {}:
pass
cth = get_type_hints(C.foo)
self.assertEqual(cth, {})
ith = get_type_hints(C().foo)
self.assertEqual(ith, {})
def test_meta_no_type_check(self):
@no_type_check_decorator
def magic_decorator(deco):
return deco
self.assertEqual(magic_decorator.__name__, 'magic_decorator')
@magic_decorator
def foo(a: 'whatevers') -> {}:
pass
@magic_decorator
class C:
def foo(a: 'whatevers') -> {}:
pass
self.assertEqual(foo.__name__, 'foo')
th = get_type_hints(foo)
self.assertEqual(th, {})
cth = get_type_hints(C.foo)
self.assertEqual(cth, {})
ith = get_type_hints(C().foo)
self.assertEqual(ith, {})
def test_default_globals(self):
code = ("class C:\n"
" def foo(self, a: 'C') -> 'D': pass\n"
"class D:\n"
" def bar(self, b: 'D') -> C: pass\n"
)
ns = {}
exec(code, ns)
hints = get_type_hints(ns['C'].foo)
self.assertEqual(hints, {'a': ns['C'], 'return': ns['D']})
class OverloadTests(BaseTestCase):
def test_overload_exists(self):
from typing import overload
def test_overload_fails(self):
from typing import overload
with self.assertRaises(RuntimeError):
@overload
def blah():
pass
blah()
def test_overload_succeeds(self):
from typing import overload
@overload
def blah():
pass
def blah():
pass
blah()
ASYNCIO = sys.version_info[:2] >= (3, 5)
ASYNCIO_TESTS = """
import asyncio
T_a = TypeVar('T_a')
class AwaitableWrapper(typing.Awaitable[T_a]):
def __init__(self, value):
self.value = value
def __await__(self) -> typing.Iterator[T_a]:
yield
return self.value
class AsyncIteratorWrapper(typing.AsyncIterator[T_a]):
def __init__(self, value: typing.Iterable[T_a]):
self.value = value
def __aiter__(self) -> typing.AsyncIterator[T_a]:
return self
@asyncio.coroutine
def __anext__(self) -> T_a:
data = yield from self.value
if data:
return data
else:
raise StopAsyncIteration
"""
if ASYNCIO:
try:
exec(ASYNCIO_TESTS)
except ImportError:
ASYNCIO = False
PY36 = sys.version_info[:2] >= (3, 6)
PY36_TESTS = """
from test import ann_module, ann_module2, ann_module3
class A:
y: float
class B(A):
x: ClassVar[Optional['B']] = None
y: int
class CSub(B):
z: ClassVar['CSub'] = B()
class G(Generic[T]):
lst: ClassVar[List[T]] = []
class CoolEmployee(NamedTuple):
name: str
cool: int
"""
if PY36:
exec(PY36_TESTS)
gth = get_type_hints
class GetTypeHintTests(BaseTestCase):
def test_get_type_hints_from_various_objects(self):
# For invalid objects should fail with TypeError (not AttributeError etc).
with self.assertRaises(TypeError):
gth(123)
with self.assertRaises(TypeError):
gth('abc')
with self.assertRaises(TypeError):
gth(None)
@skipUnless(PY36, 'Python 3.6 required')
def test_get_type_hints_modules(self):
self.assertEqual(gth(ann_module), {1: 2, 'f': Tuple[int, int], 'x': int, 'y': str})
self.assertEqual(gth(ann_module2), {})
self.assertEqual(gth(ann_module3), {})
@skipUnless(PY36, 'Python 3.6 required')
def test_get_type_hints_classes(self):
self.assertEqual(gth(ann_module.C, ann_module.__dict__),
{'y': Optional[ann_module.C]})
self.assertIsInstance(gth(ann_module.j_class), dict)
self.assertEqual(gth(ann_module.M), {'123': 123, 'o': type})
self.assertEqual(gth(ann_module.D),
{'j': str, 'k': str, 'y': Optional[ann_module.C]})
self.assertEqual(gth(ann_module.Y), {'z': int})
self.assertEqual(gth(ann_module.h_class),
{'y': Optional[ann_module.C]})
self.assertEqual(gth(ann_module.S), {'x': str, 'y': str})
self.assertEqual(gth(ann_module.foo), {'x': int})
@skipUnless(PY36, 'Python 3.6 required')
def test_respect_no_type_check(self):
@no_type_check
class NoTpCheck:
class Inn:
def __init__(self, x: 'not a type'): ...
self.assertTrue(NoTpCheck.__no_type_check__)
self.assertTrue(NoTpCheck.Inn.__init__.__no_type_check__)
self.assertEqual(gth(ann_module2.NTC.meth), {})
class ABase(Generic[T]):
def meth(x: int): ...
@no_type_check
class Der(ABase): ...
self.assertEqual(gth(ABase.meth), {'x': int})
def test_get_type_hints_for_builins(self):
# Should not fail for built-in classes and functions.
self.assertEqual(gth(int), {})
self.assertEqual(gth(type), {})
self.assertEqual(gth(dir), {})
self.assertEqual(gth(len), {})
def test_previous_behavior(self):
def testf(x, y): ...
testf.__annotations__['x'] = 'int'
self.assertEqual(gth(testf), {'x': int})
def test_get_type_hints_for_object_with_annotations(self):
class A: ...
class B: ...
b = B()
b.__annotations__ = {'x': 'A'}
self.assertEqual(gth(b, locals()), {'x': A})
@skipUnless(PY36, 'Python 3.6 required')
def test_get_type_hints_ClassVar(self):
self.assertEqual(gth(ann_module2.CV, ann_module2.__dict__),
{'var': typing.ClassVar[ann_module2.CV]})
self.assertEqual(gth(B, globals()),
{'y': int, 'x': ClassVar[Optional[B]]})
self.assertEqual(gth(CSub, globals()),
{'z': ClassVar[CSub], 'y': int, 'x': ClassVar[Optional[B]]})
self.assertEqual(gth(G), {'lst': ClassVar[List[T]]})
class CollectionsAbcTests(BaseTestCase):
def test_hashable(self):
self.assertIsInstance(42, typing.Hashable)
self.assertNotIsInstance([], typing.Hashable)
def test_iterable(self):
self.assertIsInstance([], typing.Iterable)
# Due to ABC caching, the second time takes a separate code
# path and could fail. So call this a few times.
self.assertIsInstance([], typing.Iterable)
self.assertIsInstance([], typing.Iterable)
self.assertNotIsInstance(42, typing.Iterable)
# Just in case, also test issubclass() a few times.
self.assertIsSubclass(list, typing.Iterable)
self.assertIsSubclass(list, typing.Iterable)
def test_iterator(self):
it = iter([])
self.assertIsInstance(it, typing.Iterator)
self.assertNotIsInstance(42, typing.Iterator)
@skipUnless(ASYNCIO, 'Python 3.5 and multithreading required')
def test_awaitable(self):
ns = {}
exec(
"async def foo() -> typing.Awaitable[int]:\n"
" return await AwaitableWrapper(42)\n",
globals(), ns)
foo = ns['foo']
g = foo()
self.assertIsInstance(g, typing.Awaitable)
self.assertNotIsInstance(foo, typing.Awaitable)
g.send(None) # Run foo() till completion, to avoid warning.
@skipUnless(ASYNCIO, 'Python 3.5 and multithreading required')
def test_coroutine(self):
ns = {}
exec(
"async def foo():\n"
" return\n",
globals(), ns)
foo = ns['foo']
g = foo()
self.assertIsInstance(g, typing.Coroutine)
with self.assertRaises(TypeError):
isinstance(g, typing.Coroutine[int])
self.assertNotIsInstance(foo, typing.Coroutine)
try:
g.send(None)
except StopIteration:
pass
@skipUnless(ASYNCIO, 'Python 3.5 and multithreading required')
def test_async_iterable(self):
base_it = range(10) # type: Iterator[int]
it = AsyncIteratorWrapper(base_it)
self.assertIsInstance(it, typing.AsyncIterable)
self.assertIsInstance(it, typing.AsyncIterable)
self.assertNotIsInstance(42, typing.AsyncIterable)
@skipUnless(ASYNCIO, 'Python 3.5 and multithreading required')
def test_async_iterator(self):
base_it = range(10) # type: Iterator[int]
it = AsyncIteratorWrapper(base_it)
self.assertIsInstance(it, typing.AsyncIterator)
self.assertNotIsInstance(42, typing.AsyncIterator)
def test_sized(self):
self.assertIsInstance([], typing.Sized)
self.assertNotIsInstance(42, typing.Sized)
def test_container(self):
self.assertIsInstance([], typing.Container)
self.assertNotIsInstance(42, typing.Container)
def test_collection(self):
if hasattr(typing, 'Collection'):
self.assertIsInstance(tuple(), typing.Collection)
self.assertIsInstance(frozenset(), typing.Collection)
self.assertIsSubclass(dict, typing.Collection)
self.assertNotIsInstance(42, typing.Collection)
def test_abstractset(self):
self.assertIsInstance(set(), typing.AbstractSet)
self.assertNotIsInstance(42, typing.AbstractSet)
def test_mutableset(self):
self.assertIsInstance(set(), typing.MutableSet)
self.assertNotIsInstance(frozenset(), typing.MutableSet)
def test_mapping(self):
self.assertIsInstance({}, typing.Mapping)
self.assertNotIsInstance(42, typing.Mapping)
def test_mutablemapping(self):
self.assertIsInstance({}, typing.MutableMapping)
self.assertNotIsInstance(42, typing.MutableMapping)
def test_sequence(self):
self.assertIsInstance([], typing.Sequence)
self.assertNotIsInstance(42, typing.Sequence)
def test_mutablesequence(self):
self.assertIsInstance([], typing.MutableSequence)
self.assertNotIsInstance((), typing.MutableSequence)
def test_bytestring(self):
self.assertIsInstance(b'', typing.ByteString)
self.assertIsInstance(bytearray(b''), typing.ByteString)
def test_list(self):
self.assertIsSubclass(list, typing.List)
def test_set(self):
self.assertIsSubclass(set, typing.Set)
self.assertNotIsSubclass(frozenset, typing.Set)
def test_frozenset(self):
self.assertIsSubclass(frozenset, typing.FrozenSet)
self.assertNotIsSubclass(set, typing.FrozenSet)
def test_dict(self):
self.assertIsSubclass(dict, typing.Dict)
def test_no_list_instantiation(self):
with self.assertRaises(TypeError):
typing.List()
with self.assertRaises(TypeError):
typing.List[T]()
with self.assertRaises(TypeError):
typing.List[int]()
def test_list_subclass(self):
class MyList(typing.List[int]):
pass
a = MyList()
self.assertIsInstance(a, MyList)
self.assertIsInstance(a, typing.Sequence)
self.assertIsSubclass(MyList, list)
self.assertNotIsSubclass(list, MyList)
def test_no_dict_instantiation(self):
with self.assertRaises(TypeError):
typing.Dict()
with self.assertRaises(TypeError):
typing.Dict[KT, VT]()
with self.assertRaises(TypeError):
typing.Dict[str, int]()
def test_dict_subclass(self):
class MyDict(typing.Dict[str, int]):
pass
d = MyDict()
self.assertIsInstance(d, MyDict)
self.assertIsInstance(d, typing.MutableMapping)
self.assertIsSubclass(MyDict, dict)
self.assertNotIsSubclass(dict, MyDict)
def test_no_defaultdict_instantiation(self):
with self.assertRaises(TypeError):
typing.DefaultDict()
with self.assertRaises(TypeError):
typing.DefaultDict[KT, VT]()
with self.assertRaises(TypeError):
typing.DefaultDict[str, int]()
def test_defaultdict_subclass(self):
class MyDefDict(typing.DefaultDict[str, int]):
pass
dd = MyDefDict()
self.assertIsInstance(dd, MyDefDict)
self.assertIsSubclass(MyDefDict, collections.defaultdict)
self.assertNotIsSubclass(collections.defaultdict, MyDefDict)
def test_no_set_instantiation(self):
with self.assertRaises(TypeError):
typing.Set()
with self.assertRaises(TypeError):
typing.Set[T]()
with self.assertRaises(TypeError):
typing.Set[int]()
def test_set_subclass_instantiation(self):
class MySet(typing.Set[int]):
pass
d = MySet()
self.assertIsInstance(d, MySet)
def test_no_frozenset_instantiation(self):
with self.assertRaises(TypeError):
typing.FrozenSet()
with self.assertRaises(TypeError):
typing.FrozenSet[T]()
with self.assertRaises(TypeError):
typing.FrozenSet[int]()
def test_frozenset_subclass_instantiation(self):
class MyFrozenSet(typing.FrozenSet[int]):
pass
d = MyFrozenSet()
self.assertIsInstance(d, MyFrozenSet)
def test_no_tuple_instantiation(self):
with self.assertRaises(TypeError):
Tuple()
with self.assertRaises(TypeError):
Tuple[T]()
with self.assertRaises(TypeError):
Tuple[int]()
def test_generator(self):
def foo():
yield 42
g = foo()
self.assertIsSubclass(type(g), typing.Generator)
def test_no_generator_instantiation(self):
with self.assertRaises(TypeError):
typing.Generator()
with self.assertRaises(TypeError):
typing.Generator[T, T, T]()
with self.assertRaises(TypeError):
typing.Generator[int, int, int]()
def test_subclassing(self):
class MMA(typing.MutableMapping):
pass
with self.assertRaises(TypeError): # It's abstract
MMA()
class MMC(MMA):
def __getitem__(self, k):
return None
def __setitem__(self, k, v):
pass
def __delitem__(self, k):
pass
def __iter__(self):
return iter(())
def __len__(self):
return 0
self.assertEqual(len(MMC()), 0)
assert callable(MMC.update)
self.assertIsInstance(MMC(), typing.Mapping)
class MMB(typing.MutableMapping[KT, VT]):
def __getitem__(self, k):
return None
def __setitem__(self, k, v):
pass
def __delitem__(self, k):
pass
def __iter__(self):
return iter(())
def __len__(self):
return 0
self.assertEqual(len(MMB()), 0)
self.assertEqual(len(MMB[str, str]()), 0)
self.assertEqual(len(MMB[KT, VT]()), 0)
self.assertNotIsSubclass(dict, MMA)
self.assertNotIsSubclass(dict, MMB)
self.assertIsSubclass(MMA, typing.Mapping)
self.assertIsSubclass(MMB, typing.Mapping)
self.assertIsSubclass(MMC, typing.Mapping)
self.assertIsInstance(MMB[KT, VT](), typing.Mapping)
self.assertIsInstance(MMB[KT, VT](), collections.Mapping)
self.assertIsSubclass(MMA, collections.Mapping)
self.assertIsSubclass(MMB, collections.Mapping)
self.assertIsSubclass(MMC, collections.Mapping)
self.assertIsSubclass(MMB[str, str], typing.Mapping)
self.assertIsSubclass(MMC, MMA)
class I(typing.Iterable): ...
self.assertNotIsSubclass(list, I)
class G(typing.Generator[int, int, int]): ...
def g(): yield 0
self.assertIsSubclass(G, typing.Generator)
self.assertIsSubclass(G, typing.Iterable)
if hasattr(collections, 'Generator'):
self.assertIsSubclass(G, collections.Generator)
self.assertIsSubclass(G, collections.Iterable)
self.assertNotIsSubclass(type(g), G)
def test_subclassing_subclasshook(self):
class Base(typing.Iterable):
@classmethod
def __subclasshook__(cls, other):
if other.__name__ == 'Foo':
return True
else:
return False
class C(Base): ...
class Foo: ...
class Bar: ...
self.assertIsSubclass(Foo, Base)
self.assertIsSubclass(Foo, C)
self.assertNotIsSubclass(Bar, C)
def test_subclassing_register(self):
class A(typing.Container): ...
class B(A): ...
class C: ...
A.register(C)
self.assertIsSubclass(C, A)
self.assertNotIsSubclass(C, B)
class D: ...
B.register(D)
self.assertIsSubclass(D, A)
self.assertIsSubclass(D, B)
class M(): ...
collections.MutableMapping.register(M)
self.assertIsSubclass(M, typing.Mapping)
def test_collections_as_base(self):
class M(collections.Mapping): ...
self.assertIsSubclass(M, typing.Mapping)
self.assertIsSubclass(M, typing.Iterable)
class S(collections.MutableSequence): ...
self.assertIsSubclass(S, typing.MutableSequence)
self.assertIsSubclass(S, typing.Iterable)
class I(collections.Iterable): ...
self.assertIsSubclass(I, typing.Iterable)
class A(collections.Mapping, metaclass=abc.ABCMeta): ...
class B: ...
A.register(B)
self.assertIsSubclass(B, typing.Mapping)
class OtherABCTests(BaseTestCase):
@skipUnless(hasattr(typing, 'ContextManager'),
'requires typing.ContextManager')
def test_contextmanager(self):
@contextlib.contextmanager
def manager():
yield 42
cm = manager()
self.assertIsInstance(cm, typing.ContextManager)
self.assertNotIsInstance(42, typing.ContextManager)
class TypeTests(BaseTestCase):
def test_type_basic(self):
class User: pass
class BasicUser(User): pass
class ProUser(User): pass
def new_user(user_class: Type[User]) -> User:
return user_class()
joe = new_user(BasicUser)
def test_type_typevar(self):
class User: pass
class BasicUser(User): pass
class ProUser(User): pass
U = TypeVar('U', bound=User)
def new_user(user_class: Type[U]) -> U:
return user_class()
joe = new_user(BasicUser)
def test_type_optional(self):
A = Optional[Type[BaseException]]
def foo(a: A) -> Optional[BaseException]:
if a is None:
return None
else:
return a()
assert isinstance(foo(KeyboardInterrupt), KeyboardInterrupt)
assert foo(None) is None
class NewTypeTests(BaseTestCase):
def test_basic(self):
UserId = NewType('UserId', int)
UserName = NewType('UserName', str)
self.assertIsInstance(UserId(5), int)
self.assertIsInstance(UserName('Joe'), str)
self.assertEqual(UserId(5) + 1, 6)
def test_errors(self):
UserId = NewType('UserId', int)
UserName = NewType('UserName', str)
with self.assertRaises(TypeError):
issubclass(UserId, int)
with self.assertRaises(TypeError):
class D(UserName):
pass
class NamedTupleTests(BaseTestCase):
def test_basics(self):
Emp = NamedTuple('Emp', [('name', str), ('id', int)])
self.assertIsSubclass(Emp, tuple)
joe = Emp('Joe', 42)
jim = Emp(name='Jim', id=1)
self.assertIsInstance(joe, Emp)
self.assertIsInstance(joe, tuple)
self.assertEqual(joe.name, 'Joe')
self.assertEqual(joe.id, 42)
self.assertEqual(jim.name, 'Jim')
self.assertEqual(jim.id, 1)
self.assertEqual(Emp.__name__, 'Emp')
self.assertEqual(Emp._fields, ('name', 'id'))
self.assertEqual(Emp._field_types, dict(name=str, id=int))
@skipUnless(PY36, 'Python 3.6 required')
def test_annotation_usage(self):
tim = CoolEmployee('Tim', 9000)
self.assertIsInstance(tim, CoolEmployee)
self.assertIsInstance(tim, tuple)
self.assertEqual(tim.name, 'Tim')
self.assertEqual(tim.cool, 9000)
self.assertEqual(CoolEmployee.__name__, 'CoolEmployee')
self.assertEqual(CoolEmployee._fields, ('name', 'cool'))
self.assertEqual(CoolEmployee._field_types, dict(name=str, cool=int))
@skipUnless(PY36, 'Python 3.6 required')
def test_namedtuple_keyword_usage(self):
LocalEmployee = NamedTuple("LocalEmployee", name=str, age=int)
nick = LocalEmployee('Nick', 25)
self.assertIsInstance(nick, tuple)
self.assertEqual(nick.name, 'Nick')
self.assertEqual(LocalEmployee.__name__, 'LocalEmployee')
self.assertEqual(LocalEmployee._fields, ('name', 'age'))
self.assertEqual(LocalEmployee._field_types, dict(name=str, age=int))
with self.assertRaises(TypeError):
NamedTuple('Name', [('x', int)], y=str)
with self.assertRaises(TypeError):
NamedTuple('Name', x=1, y='a')
def test_pickle(self):
global Emp # pickle wants to reference the class by name
Emp = NamedTuple('Emp', [('name', str), ('id', int)])
jane = Emp('jane', 37)
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
z = pickle.dumps(jane, proto)
jane2 = pickle.loads(z)
self.assertEqual(jane2, jane)
class IOTests(BaseTestCase):
def test_io(self):
def stuff(a: IO) -> AnyStr:
return a.readline()
a = stuff.__annotations__['a']
self.assertEqual(a.__parameters__, (AnyStr,))
def test_textio(self):
def stuff(a: TextIO) -> str:
return a.readline()
a = stuff.__annotations__['a']
self.assertEqual(a.__parameters__, ())
def test_binaryio(self):
def stuff(a: BinaryIO) -> bytes:
return a.readline()
a = stuff.__annotations__['a']
self.assertEqual(a.__parameters__, ())
def test_io_submodule(self):
from typing.io import IO, TextIO, BinaryIO, __all__, __name__
self.assertIs(IO, typing.IO)
self.assertIs(TextIO, typing.TextIO)
self.assertIs(BinaryIO, typing.BinaryIO)
self.assertEqual(set(__all__), set(['IO', 'TextIO', 'BinaryIO']))
self.assertEqual(__name__, 'typing.io')
class RETests(BaseTestCase):
# Much of this is really testing _TypeAlias.
def test_basics(self):
pat = re.compile('[a-z]+', re.I)
self.assertIsSubclass(pat.__class__, Pattern)
self.assertIsSubclass(type(pat), Pattern)
self.assertIsInstance(pat, Pattern)
mat = pat.search('12345abcde.....')
self.assertIsSubclass(mat.__class__, Match)
self.assertIsSubclass(type(mat), Match)
self.assertIsInstance(mat, Match)
# these should just work
p = Pattern[Union[str, bytes]]
m = Match[Union[bytes, str]]
def test_errors(self):
with self.assertRaises(TypeError):
# Doesn't fit AnyStr.
Pattern[int]
with self.assertRaises(TypeError):
# Can't change type vars?
Match[T]
m = Match[Union[str, bytes]]
with self.assertRaises(TypeError):
# Too complicated?
m[str]
with self.assertRaises(TypeError):
# We don't support isinstance().
isinstance(42, Pattern[str])
def test_repr(self):
self.assertEqual(repr(Pattern), 'Pattern[~AnyStr]')
self.assertEqual(repr(Pattern[str]), 'Pattern[str]')
self.assertEqual(repr(Pattern[bytes]), 'Pattern[bytes]')
self.assertEqual(repr(Match), 'Match[~AnyStr]')
self.assertEqual(repr(Match[str]), 'Match[str]')
self.assertEqual(repr(Match[bytes]), 'Match[bytes]')
def test_re_submodule(self):
from typing.re import Match, Pattern, __all__, __name__
self.assertIs(Match, typing.Match)
self.assertIs(Pattern, typing.Pattern)
self.assertEqual(set(__all__), set(['Match', 'Pattern']))
self.assertEqual(__name__, 'typing.re')
def test_cannot_subclass(self):
with self.assertRaises(TypeError) as ex:
class A(typing.Match):
pass
self.assertEqual(str(ex.exception),
"Cannot subclass typing._TypeAlias")
class AllTests(BaseTestCase):
"""Tests for __all__."""
def test_all(self):
from typing import __all__ as a
# Just spot-check the first and last of every category.
self.assertIn('AbstractSet', a)
self.assertIn('ValuesView', a)
self.assertIn('cast', a)
self.assertIn('overload', a)
if hasattr(contextlib, 'AbstractContextManager'):
self.assertIn('ContextManager', a)
# Check that io and re are not exported.
self.assertNotIn('io', a)
self.assertNotIn('re', a)
# Spot-check that stdlib modules aren't exported.
self.assertNotIn('os', a)
self.assertNotIn('sys', a)
# Check that Text is defined.
self.assertIn('Text', a)
if __name__ == '__main__':
main()
| marcinkwiatkowski/buck | third-party/py/typing/src/test_typing.py | Python | apache-2.0 | 65,695 | [
"VisIt"
] | 55327879019aa730abd56334975d818f131d6d56dbdb54600033b37f282250c9 |
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Package Setup script for TensorFlow GNN."""
import os
import platform
import subprocess
import sys
import setuptools
from setuptools import find_packages
from setuptools import setup
from setuptools.command.install import install
from setuptools.dist import Distribution
# pylint:disable=g-bad-import-order
# setuptools must be imported prior to distutils.
from distutils import spawn
from distutils.command import build
# pylint:enable=g-bad-import-order
class _BuildCommand(build.build):
"""Build everything needed to install.
This overrides the original distutils "build" command to to run bazel_build
command instead, before any sub_commands. This is convenient in order to
generate protocol buffer files and eventually also build C++ extension
modules.
The build command is also invoked from bdist_wheel and install command,
therefore this implementation covers the following commands:
- pip install . (which invokes bdist_wheel)
- python setup.py install (which invokes install command)
- python setup.py bdist_wheel (which invokes bdist_wheel command)
"""
def _build_cc_extensions(self):
return True
# Add the "bazel_build" command as the first sub-command of "build". Each
# sub_command of "build" (e.g. "build_py", "build_ext", etc.) is executed
# sequentially when running a "build" command, if the second item in the tuple
# (predicate method) is evaluated to true.
sub_commands = [
('bazel_build', _build_cc_extensions)] + build.build.sub_commands
class _BazelBuildCommand(setuptools.Command):
"""Build C++ extensions and public protos with Bazel.
Running this command will populate the *_pb2.py files next to your *.proto
files.
"""
def initialize_options(self):
pass
def finalize_options(self):
self._bazel_cmd = spawn.find_executable('bazel')
if not self._bazel_cmd:
raise RuntimeError(
'Could not find "bazel" binary. Please visit '
'https://docs.bazel.build/versions/master/install.html for '
'installation instruction.')
self._additional_build_options = []
if platform.system() == 'Darwin':
self._additional_build_options = ['--macos_minimum_os=10.9']
elif platform.system() == 'Windows':
self._additional_build_options = ['--copt=-DWIN32_LEAN_AND_MEAN']
def run(self):
subprocess.check_call(
[self._bazel_cmd,
'run', '-c', 'opt', '--experimental_repo_remote_exec'] +
self._additional_build_options +
['//package:move_generated_files'],
# Bazel should be invoked in a directory containing bazel WORKSPACE
# file, which is the root directory.
cwd=os.path.dirname(os.path.realpath(__file__)),
env=dict(os.environ, PYTHON_BIN_PATH=sys.executable))
# TFDV is not a purelib. However because of the extension module is not built
# by setuptools, it will be incorrectly treated as a purelib. The following
# works around that bug.
class _InstallPlatlibCommand(install):
def finalize_options(self):
install.finalize_options(self)
self.install_lib = self.install_platlib
class _SourceDistributionWithProtos(Distribution):
"""Proto-only deps don't require OS specific wheels."""
def is_pure(self):
return True
def has_ext_modules(self):
return False
def select_constraint(default, nightly=None, git_master=None):
"""Select dependency constraint based on TFX_DEPENDENCY_SELECTOR env var."""
selector = os.environ.get('TFX_DEPENDENCY_SELECTOR')
if selector == 'UNCONSTRAINED':
return ''
elif selector == 'NIGHTLY' and nightly is not None:
return nightly
elif selector == 'GIT_MASTER' and git_master is not None:
return git_master
else:
return default
def get_version():
"""Get version from version module."""
version_path = os.path.join(os.path.dirname(__file__), 'tensorflow_gnn')
sys.path.insert(0, version_path)
# pytype: disable=import-error # pylint: disable=g-import-not-at-top
from version import __version__ as v
return v
# Get the long description from the README file.
with open('README.md') as fp:
_LONG_DESCRIPTION = fp.read()
console_scripts = [
'tensorflow_gnn.tools.generate_training_data',
'tensorflow_gnn.tools.print_training_data',
'tensorflow_gnn.tools.sampled_stats',
'tensorflow_gnn.tools.validate_graph_schema',
]
setup(
name='tensorflow-gnn',
version=get_version(),
author='Google LLC',
author_email='tensorflow-gnn@googlegroups.com',
license='Apache 2.0',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License',
'Operating System :: MacOS :: MacOS X',
'Operating System :: POSIX :: Linux',
'Operating System :: Microsoft :: Windows',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3 :: Only',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Software Development',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
],
namespace_packages=[],
# Make sure to sync the versions of common dependencies (absl-py, numpy,
# six, and protobuf) with TF.
install_requires=[
'absl-py',
'apache-beam[gcp]>=2.32',
'grpcio',
'matplotlib',
'mock',
'networkx',
'numpy',
'protobuf>=3.17',
'pyarrow',
'pygraphviz',
'scipy',
'six',
'tensorflow-cpu>=2.7.0',
],
python_requires='>=3.9,<4',
packages=find_packages(),
include_package_data=True,
package_data={'': ['*.proto']},
zip_safe=False,
distclass=_SourceDistributionWithProtos,
description='A library for building scalable graph neural networks in TensorFlow.',
long_description=_LONG_DESCRIPTION,
long_description_content_type='text/markdown',
keywords='tensorflow gnn graph',
download_url='https://github.com/tensorflow/gnn.git',
requires=[],
cmdclass={
'install': _InstallPlatlibCommand,
'build': _BuildCommand,
'bazel_build': _BazelBuildCommand,
},
entry_points={
'console_scripts': [
'tfgnn_{}={}:main'.format(libname.split('.')[-1], libname)
for libname in console_scripts
],
}
)
| tensorflow/gnn | setup.py | Python | apache-2.0 | 7,256 | [
"VisIt"
] | 6250f957e751a0459bf3d8132527fca230f5bcbad2dd10ac35fa3efcbc13c7ec |
#!/usr/bin/python3
import argparse
import os
import sys
import copy
import random
import pysam
import sys
import gffutils
from Bio import SeqIO
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
import gzip
def build_gene_dict(annotation_filename):
tr_gene_dict = {}
db_filename = "{}.db".format(annotation_filename)
if not os.path.exists(db_filename):
gffutils.create_db(annotation_filename, dbfn=db_filename)
gtf_file = gffutils.FeatureDB(db_filename, keep_order = True)
for gene in gtf_file.features_of_type('gene'):
for tr in gtf_file.children(gene, featuretype = "transcript"):
tr_gene_dict[tr.id] = gene.id
return tr_gene_dict
def split_bam(aln_filename, gene_chr_dict, gene_list, out_dir, unmapped_reads):
out_files = {}
input_mode = 'rb' if aln_filename.endswith('bam') else 'r'
with pysam.AlignmentFile(aln_filename, input_mode) as aln_file:
for aln in aln_file:
tr_name = aln_file.get_reference_name(aln.reference_id)
if tr_name not in gene_chr_dict:
continue
if gene_chr_dict[tr_name] in gene_list:
outf = gene_chr_dict[tr_name]
if gene_chr_dict[tr_name] not in out_files:
output_filename = "{}/{}.fa.gz".format(out_dir, outf)
file_handle = gzip.open(output_filename, "wt")
out_files[outf] = file_handle
fasta_seq = SeqRecord(Seq(aln.query_sequence), id=aln.query_name, name=aln.query_name, description="From Salmon alignment file")
SeqIO.write(fasta_seq, out_files[outf], "fasta")
if fasta_seq.id in unmapped_reads:
SeqIO.write(unmapped_reads[fasta_seq.id], out_files[outf], "fasta")
def parse_unmapped_file(unmapped_filename, sample_filenames):
unmapped_tags = set()
with open(unmapped_filename) as unmapped_file:
for line in unmapped_file:
tag, unmap_type = line.strip('\n').split(' ')
if unmap_type in ['m1', 'm2']:
file_suff = "2" if unmap_type == 'm1' else "1"
unmapped_tags.add((tag, file_suff))
print("Parsing samples to retrieve reads")
unmapped_reads = {}
for sample_fn in sample_filenames:
print("Parsing {}".format(sample_fn))
file_name = os.path.basename(sample_fn)
file_suff = file_name[file_name.find('_')+1:-len('.fastq.gz')]
file_handle = gzip.open(sample_fn, "rt")
for record in SeqIO.parse(file_handle, "fastq"):
test_elem = (record.id, file_suff)
if test_elem in unmapped_tags:
record_copy = copy.deepcopy(record)
record_copy.id="{}/{}".format(record_copy.id, file_suff)
unmapped_reads[record.id] = record_copy
return unmapped_reads
def main():
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('alignment_filename', metavar='BAM/SAM FILE', help='Salmon output BAM/SAM')
parser.add_argument('annotation_filename', metavar='GTF FILE', help='Annotation file')
parser.add_argument('unmapped_filename', metavar='UNMAPPED FILE', help='Unmapped file from Salmon')
parser.add_argument('sample_filenames', metavar='SAMPLE FILES', nargs=2, help='Reads sample')
parser.add_argument('-g', '--gene-list', metavar='GENE FILE', dest='genes',
help='File with a list of target genes', required = 'true')
parser.add_argument('-o', '--output-dir', dest='output_dir', help='Output Directory',
default='.')
args = parser.parse_args()
out_dir = os.path.abspath(args.output_dir)
if not os.path.isfile(args.alignment_filename):
print("Error: {} is not a file.".format(args.alignment_filename))
print("Aborting...")
sys.exit(1)
if os.path.exists(out_dir) and os.path.isfile(out_dir):
print("Error: {} is a file.".format(out_dir))
print("Aborting...")
sys.exit(1)
if not os.path.exists(out_dir):
os.mkdir(out_dir)
print("Parsing GTF annotation")
gene_chr_dict = build_gene_dict(args.annotation_filename)
print("Retrieving unmapped reads")
unmapped_reads = parse_unmapped_file(args.unmapped_filename, args.sample_filenames)
print("unmapped reads that will be remapped: {}".format(len(unmapped_reads)))
print("Split BAM")
gene_list = list()
num_genes = 0
tot_genes = 0
with open(args.genes, "r") as gf:
for g in gf.readlines():
if num_genes == 930:
print("Parsing BAM alignments")
split_bam(args.alignment_filename, gene_chr_dict, gene_list, out_dir, unmapped_reads)
num_genes = 0
gene_list = list()
print("Genes completed: {}".format(tot_genes))
else:
num_genes += 1
gene_list.append(g.rstrip())
tot_genes += 1
if num_genes > 0:
print("Parsing BAM alignments")
split_bam(args.alignment_filename, gene_chr_dict, gene_list, out_dir, unmapped_reads)
return
if __name__ == "__main__":
main()
| AlgoLab/galig | paper/experiments/SimulatedData/tools/splitSalmonBAM.py | Python | gpl-3.0 | 5,273 | [
"pysam"
] | 2d022d6f98d0512b99b19048eb431364a5b034379302a5e9f190aae5a15a81b9 |
stations = { 'acheng': 'ACB',
'aershan': 'ART',
'aershanbei': 'ARX',
'aihe': 'AHP',
'aijiacun': 'AJJ',
'ajin': 'AJD',
'akesu': 'ASR',
'aketao': 'AER',
'alashankou': 'AKR',
'alihe': 'AHX',
'alongshan': 'ASX',
'amuer': 'JTX',
'ananzhuang': 'AZM',
'anda': 'ADX',
'ande': 'ARW',
'anding': 'ADP',
'angangxi': 'AAX',
'anguang': 'AGT',
'anhua': 'PKQ',
'anjia': 'AJB',
'ankang': 'AKY',
'ankouyao': 'AYY',
'anlong': 'AUZ',
'anlu': 'ALN',
'anping': 'APT',
'anqing': 'AQH',
'anqingxi': 'APH',
'anren': 'ARG',
'anshan': 'AST',
'anshanxi': 'AXT',
'anshun': 'ASW',
'anshunxi': 'ASE',
'antang': 'ATV',
'antingbei': 'ASH',
'antu': 'ATL',
'antuxi': 'AXL',
'anxi': 'AXS',
'anyang': 'AYF',
'anyangdong': 'ADF',
'aojiang': 'ARH',
'aolibugao': 'ALD',
'atushi': 'ATR',
'babu': 'BBE',
'bachu': 'BCR',
'badaling': 'ILP',
'badong': 'BNN',
'baibiguan': 'BGV',
'baicheng': 'BCT',
'baigou': 'FEP',
'baiguo': 'BGM',
'baihe': 'BEL',
'baihedong': 'BIY',
'baihexian': 'BEY',
'baijian': 'BAP',
'baijigou': 'BJJ',
'baijipo': 'BBM',
'baikuipu': 'BKB',
'bailang': 'BRZ',
'bailixia': 'AAP',
'baimajing': 'BFQ',
'baiqi': 'BQP',
'baiquan': 'BQL',
'baise': 'BIZ',
'baisha': 'BSW',
'baishanshi': 'HJL',
'baishapo': 'BPM',
'baishishan': 'BAL',
'baishuijiang': 'BSY',
'baishuixian': 'BGY',
'baishuizhen': 'BUM',
'baiyangdian': 'FWP',
'baiyi': 'FHW',
'baiyinchagan': 'BYC',
'baiyinhuanan': 'FNC',
'baiyinhushuo': 'BCD',
'baiyinshi': 'BNJ',
'baiyintala': 'BID',
'baiyinxi': 'BXJ',
'baiyunebo': 'BEC',
'bajiaotai': 'BTD',
'balin': 'BLX',
'bamiancheng': 'BMD',
'bamiantong': 'BMB',
'bancheng': 'BUP',
'banmaoqing': 'BNM',
'bantian': 'BTQ',
'baodi': 'BPP',
'baoding': 'BDP',
'baodingdong': 'BMP',
'baohuashan': 'BWH',
'baoji': 'BJY',
'baojinan': 'BBY',
'baokang': 'BKD',
'baolage': 'BQC',
'baolin': 'BNB',
'baolongshan': 'BND',
'baoqing': 'BUB',
'baoquanling': 'BQB',
'baotou': 'BTC',
'baotoudong': 'BDC',
'bashan': 'BAY',
'baxiantong': 'VXD',
'bayangaole': 'BAC',
'bayuquan': 'BYT',
'bazhong': 'IEW',
'bazhongdong': 'BDE',
'bazhou': 'RMP',
'bazhouxi': 'FOP',
'beian': 'BAB',
'beibei': 'BPW',
'beidaihe': 'BEP',
'beihai': 'BHZ',
'beijiao': 'IBQ',
'beijing': 'BJP',
'beijingbei': 'VAP',
'beijingdong': 'BOP',
'beijingnan': 'VNP',
'beijingxi': 'BXP',
'beijingzi': 'BRT',
'beiliu': 'BOZ',
'beimaquanzi': 'BRP',
'beipiaonan': 'RPD',
'beitai': 'BTT',
'beitun': 'BYP',
'beitunshi': 'BXR',
'beiying': 'BIV',
'beiyinhe': 'BYB',
'beizhai': 'BVP',
'bencha': 'FWH',
'bengbu': 'BBH',
'bengbunan': 'BMH',
'benhong': 'BVC',
'benxi': 'BXT',
'benxihu': 'BHT',
'benxixincheng': 'BVT',
'bijiang': 'BLQ',
'bijiashan': 'BSB',
'bijiguan': 'BJM',
'binhai': 'FHP',
'binhaibei': 'FCP',
'binjiang': 'BJB',
'binxian': 'BXY',
'binyang': 'UKZ',
'binzhou': 'BIK',
'bishan': 'FZW',
'boao': 'BWQ',
'bobai': 'BBZ',
'boketu': 'BKX',
'bole': 'BOR',
'boli': 'BLB',
'botou': 'BZP',
'boxing': 'BXK',
'bozhou': 'BZH',
'buhai': 'BUT',
'buliekai': 'BLR',
'caijiagou': 'CJT',
'caijiapo': 'CJY',
'caishan': 'CON',
'cangnan': 'CEH',
'cangshi': 'CST',
'cangxi': 'CXE',
'cangzhou': 'COP',
'cangzhouxi': 'CBP',
'caohai': 'WBW',
'caohekou': 'CKT',
'caoshi': 'CSL',
'caoxian': 'CXK',
'caozili': 'CFP',
'ceheng': 'CHZ',
'cenxi': 'CNZ',
'chabuga': 'CBC',
'chaigang': 'CGT',
'chaigoupu': 'CGV',
'chaihe': 'CHB',
'chajiang': 'CAM',
'chaka': 'CVO',
'chaling': 'CDG',
'chalingnan': 'CNG',
'changcheng': 'CEJ',
'changchong': 'CCM',
'changchun': 'CCT',
'changchunnan': 'CET',
'changchunxi': 'CRT',
'changde': 'VGQ',
'changdian': 'CDT',
'changge': 'CEF',
'changle': 'CLK',
'changli': 'CLP',
'changlingzi': 'CLT',
'changlinhe': 'FVH',
'changnong': 'CNJ',
'changping': 'DAQ',
'changpingbei': 'VBP',
'changpingdong': 'FQQ',
'changpoling': 'CPM',
'changqingqiao': 'CQJ',
'changsha': 'CSQ',
'changshanan': 'CWQ',
'changshantun': 'CVT',
'changshou': 'EFW',
'changshoubei': 'COW',
'changshouhu': 'CSE',
'changting': 'CES',
'changtingnan': 'CNS',
'changtingzhen': 'CDB',
'changtu': 'CTT',
'changtuxi': 'CPT',
'changwu': 'CWY',
'changxing': 'CBH',
'changxingnan': 'CFH',
'changyang': 'CYN',
'changyuan': 'CYF',
'changzheng': 'CZJ',
'changzhi': 'CZF',
'changzhibei': 'CBF',
'changzhou': 'CZH',
'changzhoubei': 'ESH',
'changzhuang': 'CVK',
'chaohu': 'CIH',
'chaohudong': 'GUH',
'chaolianggou': 'CYP',
'chaoshan': 'CBQ',
'chaoyang': 'CYD',
'chaoyangchuan': 'CYL',
'chaoyangdi': 'CDD',
'chaoyangzhen': 'CZL',
'chaozhou': 'CKQ',
'chasuqi': 'CSC',
'chengcheng': 'CUY',
'chengde': 'CDP',
'chengdedong': 'CCP',
'chengdu': 'CDW',
'chengdudong': 'ICW',
'chengdunan': 'CNW',
'chenggaozi': 'CZB',
'chenggu': 'CGY',
'chengjisihan': 'CJX',
'chenguanying': 'CAJ',
'chengyang': 'CEK',
'chengzitan': 'CWT',
'chenming': 'CMB',
'chenqing': 'CQB',
'chenxi': 'CXQ',
'chenxiangtun': 'CXT',
'chenzhou': 'CZQ',
'chenzhouxi': 'ICQ',
'chezhuanwan': 'CWM',
'chibi': 'CBN',
'chibibei': 'CIN',
'chifeng': 'CFD',
'chifengxi': 'CID',
'chizhou': 'IYH',
'chongqing': 'CQW',
'chongqingbei': 'CUW',
'chongqingnan': 'CRW',
'chongren': 'CRG',
'chongzuo': 'CZZ',
'chuangyecun': 'CEX',
'chunwan': 'CQQ',
'chunyang': 'CAL',
'chushan': 'CSB',
'chuxiong': 'COM',
'chuzhou': 'CXH',
'chuzhoubei': 'CUH',
'cili': 'CUQ',
'cishan': 'CSP',
'cixi': 'CRP',
'cixian': 'CIP',
'ciyao': 'CYK',
'congjiang': 'KNW',
'cuihuangkou': 'CHP',
'cuogang': 'CAX',
'daan': 'RAT',
'daanbei': 'RNT',
'daba': 'DBJ',
'daban': 'DBC',
'dachaigou': 'DGJ',
'dacheng': 'DCT',
'dadenggou': 'DKJ',
'dafangnan': 'DNE',
'daguan': 'RGW',
'daguantun': 'DTT',
'dagushan': 'RMT',
'dahongqi': 'DQD',
'dahuichang': 'DHP',
'dahushan': 'DHD',
'dailing': 'DLB',
'daixian': 'DKV',
'daiyue': 'RYV',
'dajiagou': 'DJT',
'dajian': 'DFP',
'daju': 'DIM',
'dakoutun': 'DKP',
'dalateqi': 'DIC',
'dalatexi': 'DNC',
'dali': 'DKM',
'dalian': 'DLT',
'dalianbei': 'DFT',
'dalin': 'DLD',
'daluhao': 'DLC',
'dandong': 'DUT',
'dandongxi': 'RWT',
'danfeng': 'DGY',
'dangshan': 'DKH',
'dangshannan': 'PRH',
'dangtudong': 'OWH',
'dangyang': 'DYN',
'dani': 'DNZ',
'dantu': 'RUH',
'danxiashan': 'IRQ',
'danyang': 'DYH',
'danyangbei': 'EXH',
'daobao': 'RBT',
'daoerdeng': 'DRD',
'daoqing': 'DML',
'daozhou': 'DFZ',
'dapanshi': 'RPP',
'dapingfang': 'DPD',
'dapu': 'DPI',
'daqilaha': 'DQX',
'daqing': 'DZX',
'daqingdong': 'LFX',
'daqinggou': 'DSD',
'daqingxi': 'RHX',
'dashiqiao': 'DQT',
'dashitou': 'DSL',
'dashitounan': 'DAL',
'dashizhai': 'RZT',
'datianbian': 'DBM',
'datong': 'DTV',
'datongxi': 'DTO',
'datun': 'DNT',
'dawang': 'WWQ',
'dawangtan': 'DZZ',
'dawanzi': 'DFM',
'dawukou': 'DFJ',
'daxing': 'DXX',
'daxinggou': 'DXL',
'dayan': 'DYX',
'dayangshu': 'DUX',
'dayebei': 'DBN',
'daying': 'DYV',
'dayingdong': 'IAW',
'dayingzhen': 'DJP',
'dayingzi': 'DZD',
'dayu': 'DYG',
'dayuan': 'DYZ',
'dazhanchang': 'DTJ',
'dazhangzi': 'DAP',
'dazhou': 'RXW',
'dazhuyuan': 'DZY',
'dazunan': 'FQW',
'dean': 'DAG',
'debao': 'RBZ',
'debosi': 'RDT',
'dechang': 'DVW',
'deerbuer': 'DRX',
'dehui': 'DHT',
'dehuixi': 'DXT',
'delingha': 'DHO',
'dengshahe': 'DWT',
'dengta': 'DGT',
'dengzhou': 'DOF',
'deqing': 'DRH',
'deqingxi': 'MOH',
'dexing': 'DWG',
'deyang': 'DYW',
'dezhou': 'DZP',
'dezhoudong': 'DIP',
'dianjiang': 'DJE',
'dianxin': 'DXM',
'didao': 'DDB',
'dingbian': 'DYJ',
'dinghudong': 'UWQ',
'dinghushan': 'NVQ',
'dingnan': 'DNG',
'dingtao': 'DQK',
'dingxi': 'DSJ',
'dingxiang': 'DXV',
'dingyuan': 'EWH',
'dingzhou': 'DXP',
'dingzhoudong': 'DOP',
'diwopu': 'DWJ',
'dizhuang': 'DVQ',
'dongandong': 'DCZ',
'dongbianjing': 'DBB',
'dongdaihe': 'RDD',
'dongerdaohe': 'DRB',
'dongfang': 'UFQ',
'dongfanghong': 'DFB',
'dongfeng': 'DIL',
'donggangbei': 'RGT',
'dongguan': 'RTQ',
'dongguandong': 'DMQ',
'dongguang': 'DGP',
'donghai': 'DHB',
'donghaixian': 'DQH',
'dongjin': 'DKB',
'dongjingcheng': 'DJB',
'donglai': 'RVD',
'dongmiaohe': 'DEP',
'dongmingcun': 'DMD',
'dongmingxian': 'DNF',
'dongsheng': 'DOC',
'dongshengxi': 'DYC',
'dongtai': 'DBH',
'dongtonghua': 'DTL',
'dongwan': 'DRJ',
'dongxiang': 'DXG',
'dongxinzhuang': 'DXD',
'dongxu': 'RXP',
'dongying': 'DPK',
'dongyingnan': 'DOK',
'dongyudi': 'DBV',
'dongzhen': 'DNV',
'dongzhi': 'DCH',
'dongzhuang': 'DZV',
'douluo': 'DLV',
'douzhangzhuang': 'RZP',
'douzhuang': 'ROP',
'duanzhou': 'WZQ',
'duge': 'DMM',
'duiqingshan': 'DQB',
'duizhen': 'DWV',
'dujia': 'DJL',
'dujiangyan': 'DDW',
'dulitun': 'DTX',
'dunhua': 'DHL',
'dunhuang': 'DHJ',
'dushan': 'RWW',
'dushupu': 'DPM',
'duyun': 'RYW',
'duyundong': 'KJW',
'ebian': 'EBW',
'eerduosi': 'EEC',
'ejina': 'EJC',
'emei': 'EMW',
'emeishan': 'IXW',
'enshi': 'ESN',
'erdaogoumen': 'RDP',
'erdaowan': 'RDX',
'erlian': 'RLC',
'erlong': 'RLD',
'erlongshantun': 'ELA',
'ermihe': 'RML',
'erying': 'RYJ',
'ezhou': 'ECN',
'ezhoudong': 'EFN',
'faer': 'FEM',
'fanchangxi': 'PUH',
'fangchenggangbei': 'FBZ',
'fanjiatun': 'FTT',
'fanshi': 'FSV',
'fanzhen': 'VZK',
'faqi': 'FQE',
'feidong': 'FIH',
'feixian': 'FXK',
'fengcheng': 'FCG',
'fengchengdong': 'FDT',
'fengchengnan': 'FNG',
'fengdu': 'FUW',
'fenghua': 'FHH',
'fenghuangcheng': 'FHT',
'fenghuangjichang': 'FJQ',
'fenglezhen': 'FZB',
'fenglingdu': 'FLV',
'fengshuicun': 'FSJ',
'fengshun': 'FUQ',
'fengtun': 'FTX',
'fengxian': 'FXY',
'fengyang': 'FUH',
'fengzhen': 'FZC',
'fengzhou': 'FZY',
'fenhe': 'FEV',
'fenyang': 'FAV',
'fenyi': 'FYG',
'foshan': 'FSQ',
'fuan': 'FAS',
'fuchuan': 'FDZ',
'fuding': 'FES',
'fuhai': 'FHR',
'fujin': 'FIB',
'fulaerji': 'FRX',
'fuling': 'FLW',
'fulingbei': 'FEW',
'fuliqu': 'FLJ',
'fulitun': 'FTB',
'funan': 'FNH',
'funing': 'FNP',
'fuqing': 'FQS',
'fuquan': 'VMW',
'fushankou': 'FKP',
'fushanzhen': 'FZQ',
'fushun': 'FST',
'fushunbei': 'FET',
'fusong': 'FSL',
'fusui': 'FSZ',
'futian': 'NZQ',
'futuyu': 'FYP',
'fuxian': 'FEY',
'fuxiandong': 'FDY',
'fuxin': 'FXD',
'fuyang': 'FYH',
'fuyu': 'FYX',
'fuyuan': 'FYM',
'fuyubei': 'FBT',
'fuzhou': 'FZG',
'fuzhoubei': 'FBG',
'fuzhoudong': 'FDG',
'fuzhounan': 'FYS',
'gaizhou': 'GXT',
'gaizhouxi': 'GAT',
'gancaodian': 'GDJ',
'gangou': 'GGL',
'gangu': 'GGJ',
'ganhe': 'GAX',
'ganluo': 'VOW',
'ganqika': 'GQD',
'ganquan': 'GQY',
'ganquanbei': 'GEY',
'ganshui': 'GSW',
'gantang': 'GNJ',
'ganzhou': 'GZG',
'gaoan': 'GCG',
'gaobeidian': 'GBP',
'gaobeidiandong': 'GMP',
'gaocheng': 'GEP',
'gaocun': 'GCV',
'gaogezhuang': 'GGP',
'gaolan': 'GEJ',
'gaoloufang': 'GFM',
'gaomi': 'GMK',
'gaoping': 'GPF',
'gaoqiaozhen': 'GZD',
'gaoshanzi': 'GSD',
'gaotai': 'GTJ',
'gaotainan': 'GAJ',
'gaotan': 'GAY',
'gaoyi': 'GIP',
'gaoyixi': 'GNP',
'gaozhou': 'GSQ',
'gashidianzi': 'GXD',
'gediannan': 'GNN',
'geermu': 'GRO',
'gegenmiao': 'GGT',
'geju': 'GEM',
'genhe': 'GEX',
'gezhenpu': 'GZT',
'gongcheng': 'GCZ',
'gongmiaozi': 'GMC',
'gongnonghu': 'GRT',
'gongpengzi': 'GPT',
'gongqingcheng': 'GAG',
'gongyi': 'GXF',
'gongyinan': 'GYF',
'gongyingzi': 'GYD',
'gongzhuling': 'GLT',
'gongzhulingnan': 'GBT',
'goubangzi': 'GBD',
'guan': 'GFP',
'guangan': 'VJW',
'guangannan': 'VUW',
'guangao': 'GVP',
'guangde': 'GRH',
'guanghan': 'GHW',
'guanghanbei': 'GVW',
'guangmingcheng': 'IMQ',
'guangnanwei': 'GNM',
'guangning': 'FBQ',
'guangningsi': 'GQT',
'guangningsinan': 'GNT',
'guangshan': 'GUN',
'guangshui': 'GSN',
'guangtongbei': 'GPM',
'guangyuan': 'GYW',
'guangyuannan': 'GAW',
'guangze': 'GZS',
'guangzhou': 'GZQ',
'guangzhoubei': 'GBQ',
'guangzhoudong': 'GGQ',
'guangzhounan': 'IZQ',
'guangzhouxi': 'GXQ',
'guanlin': 'GLF',
'guanling': 'GLE',
'guanshui': 'GST',
'guanting': 'GTP',
'guantingxi': 'KEP',
'guanzhaishan': 'GSS',
'guanzijing': 'GOT',
'guazhou': 'GZJ',
'gucheng': 'GCN',
'guchengzhen': 'GZB',
'gudong': 'GDV',
'guian': 'GAE',
'guiding': 'GTW',
'guidingbei': 'FMW',
'guidingnan': 'IDW',
'guidingxian': 'KIW',
'guigang': 'GGZ',
'guilin': 'GLZ',
'guilinbei': 'GBZ',
'guilinxi': 'GEZ',
'guiliuhe': 'GHT',
'guiping': 'GAZ',
'guixi': 'GXG',
'guiyang': 'GIW',
'guiyangbei': 'KQW',
'gujiao': 'GJV',
'gujiazi': 'GKT',
'gulang': 'GLJ',
'gulian': 'GRX',
'guojiadian': 'GDT',
'guoleizhuang': 'GLP',
'guosong': 'GSL',
'guoyang': 'GYH',
'guozhen': 'GZY',
'gushankou': 'GSP',
'gushi': 'GXN',
'gutian': 'GTS',
'gutianbei': 'GBS',
'gutianhuizhi': 'STS',
'guyuan': 'GUJ',
'guzhen': 'GEH',
'haerbin': 'HBB',
'haerbinbei': 'HTB',
'haerbindong': 'VBB',
'haerbinxi': 'VAB',
'haianxian': 'HIH',
'haibei': 'HEB',
'haicheng': 'HCT',
'haichengxi': 'HXT',
'haidongxi': 'HDO',
'haikou': 'VUQ',
'haikoudong': 'HMQ',
'hailaer': 'HRX',
'hailin': 'HRB',
'hailong': 'HIL',
'hailun': 'HLB',
'haining': 'HNH',
'hainingxi': 'EUH',
'haishiwan': 'HSO',
'haituozi': 'HZT',
'haiwan': 'RWH',
'haiyang': 'HYK',
'haiyangbei': 'HEK',
'halahai': 'HIT',
'halasu': 'HAX',
'hami': 'HMR',
'hancheng': 'HCY',
'hanchuan': 'HCN',
'hanconggou': 'HKB',
'handan': 'HDP',
'handandong': 'HPP',
'hanfuwan': 'HXJ',
'hangjinhouqi': 'HDC',
'hangu': 'HGP',
'hangzhou': 'HZH',
'hangzhoudong': 'HGH',
'hangzhounan': 'XHH',
'hanjiang': 'HJS',
'hankou': 'HKN',
'hanling': 'HAT',
'hanmaying': 'HYP',
'hanshou': 'VSQ',
'hanyin': 'HQY',
'hanyuan': 'WHW',
'hanzhong': 'HOY',
'haolianghe': 'HHB',
'hebei': 'HMB',
'hebi': 'HAF',
'hebian': 'HBV',
'hebidong': 'HFF',
'hechuan': 'WKW',
'hechun': 'HCZ',
'hefei': 'HFH',
'hefeibeicheng': 'COH',
'hefeinan': 'ENH',
'hefeixi': 'HTH',
'hegang': 'HGB',
'heichongtan': 'HCJ',
'heihe': 'HJB',
'heijing': 'HIM',
'heishui': 'HOT',
'heitai': 'HQB',
'heiwang': 'HWK',
'hejiadian': 'HJJ',
'hejianxi': 'HXP',
'hejin': 'HJV',
'hejing': 'HJR',
'hekoubei': 'HBM',
'hekounan': 'HKJ',
'heli': 'HOB',
'helong': 'HLL',
'hengdaohezi': 'HDB',
'hengfeng': 'HFG',
'henggouqiaodong': 'HNN',
'hengnan': 'HNG',
'hengshan': 'HSQ',
'hengshanxi': 'HEQ',
'hengshui': 'HSP',
'hengyang': 'HYQ',
'hengyangdong': 'HVQ',
'heping': 'VAQ',
'hepu': 'HVZ',
'heqing': 'HQM',
'heshengqiaodong': 'HLN',
'heshituoluogai': 'VSR',
'heshuo': 'VUR',
'hetian': 'VTR',
'heyang': 'HAY',
'heyangbei': 'HTY',
'heyuan': 'VIQ',
'heze': 'HIK',
'hezhou': 'HXZ',
'hongan': 'HWN',
'honganxi': 'VXN',
'hongguangzhen': 'IGW',
'hongguo': 'HEM',
'honghe': 'HPB',
'honghuagou': 'VHD',
'hongjiang': 'HFM',
'hongqing': 'HEY',
'hongshan': 'VSB',
'hongshaxian': 'VSJ',
'hongsipu': 'HSJ',
'hongtong': 'HDV',
'hongtongxi': 'HTV',
'hongxiantai': 'HTJ',
'hongxing': 'VXB',
'hongxinglong': 'VHB',
'hongyan': 'VIX',
'houma': 'HMV',
'houmaxi': 'HPV',
'houmen': 'KMQ',
'huacheng': 'VCQ',
'huade': 'HGC',
'huahu': 'KHN',
'huaian': 'AUH',
'huaiannan': 'AMH',
'huaibei': 'HRH',
'huaibin': 'HVN',
'huaihua': 'HHQ',
'huaihuanan': 'KAQ',
'huaiji': 'FAQ',
'huainan': 'HAH',
'huainandong': 'HOH',
'huairen': 'HRV',
'huairendong': 'HFV',
'huairou': 'HRP',
'huairoubei': 'HBP',
'huajia': 'HJT',
'huajiazhuang': 'HJM',
'hualin': 'HIB',
'huanan': 'HNB',
'huangbai': 'HBL',
'huangchuan': 'KCN',
'huangcun': 'HCP',
'huanggang': 'KGN',
'huanggangdong': 'KAN',
'huanggangxi': 'KXN',
'huangguayuan': 'HYM',
'huanggutun': 'HTT',
'huanghejingqu': 'HCF',
'huanghuatong': 'HUD',
'huangkou': 'KOH',
'huangling': 'ULY',
'huanglingnan': 'VLY',
'huangliu': 'KLQ',
'huangmei': 'VEH',
'huangnihe': 'HHL',
'huangshan': 'HKH',
'huangshanbei': 'NYH',
'huangshi': 'HSN',
'huangshibei': 'KSN',
'huangshidong': 'OSN',
'huangsongdian': 'HDL',
'huangyangtan': 'HGJ',
'huangyangzhen': 'HYJ',
'huangyuan': 'HNO',
'huangzhou': 'VON',
'huantai': 'VTK',
'huanxintian': 'VTB',
'huapengzi': 'HZM',
'huaqiao': 'VQH',
'huarong': 'HRN',
'huarongdong': 'HPN',
'huarongnan': 'KRN',
'huashan': 'HSY',
'huashanbei': 'HDY',
'huashannan': 'KNN',
'huaying': 'HUW',
'huayuan': 'HUN',
'huayuankou': 'HYT',
'huazhou': 'HZZ',
'huhehaote': 'HHC',
'huhehaotedong': 'NDC',
'huian': 'HNS',
'huichangbei': 'XEG',
'huidong': 'KDQ',
'huihuan': 'KHQ',
'huinong': 'HMJ',
'huishan': 'VCH',
'huitong': 'VTQ',
'huixian': 'HYY',
'huizhou': 'HCQ',
'huizhounan': 'KNQ',
'huizhouxi': 'VXQ',
'hukou': 'HKG',
'hulan': 'HUB',
'hulin': 'VLB',
'huludao': 'HLD',
'huludaobei': 'HPD',
'hulusitai': 'VTJ',
'humen': 'IUQ',
'hunchun': 'HUL',
'hunhe': 'HHT',
'huoerguosi': 'HFR',
'huojia': 'HJF',
'huolianzhai': 'HLT',
'huolinguole': 'HWD',
'huoqiu': 'FBH',
'huozhou': 'HZV',
'huozhoudong': 'HWV',
'hushiha': 'HHP',
'hushitai': 'HUT',
'huzhou': 'VZH',
'jiafeng': 'JFF',
'jiagedaqi': 'JGX',
'jialuhe': 'JLF',
'jiamusi': 'JMB',
'jian': 'VAG',
'jianchang': 'JFD',
'jianfeng': 'PFQ',
'jiangbiancun': 'JBG',
'jiangdu': 'UDH',
'jianghua': 'JHZ',
'jiangjia': 'JJB',
'jiangjin': 'JJW',
'jiangle': 'JLS',
'jiangmen': 'JWQ',
'jiangning': 'JJH',
'jiangningxi': 'OKH',
'jiangqiao': 'JQX',
'jiangshan': 'JUH',
'jiangsuotian': 'JOM',
'jiangyan': 'UEH',
'jiangyong': 'JYZ',
'jiangyou': 'JFW',
'jiangyuan': 'SZL',
'jianhu': 'AJH',
'jianningxianbei': 'JCS',
'jianou': 'JVS',
'jianouxi': 'JUS',
'jiansanjiang': 'JIB',
'jianshe': 'JET',
'jianshi': 'JRN',
'jianshui': 'JSM',
'jianyang': 'JYW',
'jianyangnan': 'JOW',
'jiaocheng': 'JNV',
'jiaohe': 'JHL',
'jiaohexi': 'JOL',
'jiaomei': 'JES',
'jiaozhou': 'JXK',
'jiaozhoubei': 'JZK',
'jiaozuo': 'JOF',
'jiaozuodong': 'WEF',
'jiashan': 'JSH',
'jiashannan': 'EAH',
'jiaxiang': 'JUK',
'jiaxing': 'JXH',
'jiaxingnan': 'EPH',
'jiaxinzi': 'JXT',
'jiayuguan': 'JGJ',
'jiayuguannan': 'JBJ',
'jidong': 'JOB',
'jieshoushi': 'JUN',
'jiexiu': 'JXV',
'jiexiudong': 'JDV',
'jieyang': 'JRQ',
'jiguanshan': 'JST',
'jijiagou': 'VJD',
'jilin': 'JLL',
'jiling': 'JLJ',
'jimobei': 'JVK',
'jinan': 'JNK',
'jinandong': 'JAK',
'jinanxi': 'JGK',
'jinbaotun': 'JBD',
'jinchang': 'JCJ',
'jincheng': 'JCF',
'jinchengbei': 'JEF',
'jinchengjiang': 'JJZ',
'jingbian': 'JIY',
'jingchuan': 'JAJ',
'jingde': 'NSH',
'jingdezhen': 'JCG',
'jingdian': 'JFP',
'jinggangshan': 'JGG',
'jinghai': 'JHP',
'jinghe': 'JHR',
'jinghenan': 'JIR',
'jingmen': 'JMN',
'jingnan': 'JNP',
'jingoutun': 'VGP',
'jingpeng': 'JPC',
'jingshan': 'JCN',
'jingtai': 'JTJ',
'jingtieshan': 'JVJ',
'jingxi': 'JMZ',
'jingxian': 'LOH',
'jingxing': 'JJP',
'jingyu': 'JYL',
'jingyuan': 'JYJ',
'jingyuanxi': 'JXJ',
'jingzhou': 'JBN',
'jinhe': 'JHB',
'jinhua': 'JBH',
'jinhuanan': 'RNH',
'jining': 'JIK',
'jiningnan': 'JAC',
'jinjiang': 'JJS',
'jinkeng': 'JKT',
'jinmacun': 'JMM',
'jinshanbei': 'EGH',
'jinshantun': 'JTB',
'jinxian': 'JUG',
'jinxiannan': 'JXG',
'jinyuewan': 'PYQ',
'jinyun': 'JYH',
'jinyunxi': 'PYH',
'jinzhai': 'JZH',
'jinzhangzi': 'JYD',
'jinzhong': 'JZV',
'jinzhou': 'JZD',
'jinzhounan': 'JOD',
'jishan': 'JVV',
'jishou': 'JIQ',
'jishu': 'JSL',
'jiujiang': 'JJG',
'jiuquan': 'JQJ',
'jiuquannan': 'JNJ',
'jiusan': 'SSX',
'jiutai': 'JTL',
'jiutainan': 'JNL',
'jiuzhuangwo': 'JVP',
'jiwen': 'JWX',
'jixi': 'JXB',
'jixian': 'JKP',
'jixibei': 'NRH',
'jixixian': 'JRH',
'jiyuan': 'JYF',
'juancheng': 'JCK',
'jubao': 'JRT',
'junan': 'JOK',
'junde': 'JDB',
'junliangchengbei': 'JMP',
'jurongxi': 'JWH',
'juxian': 'JKK',
'juye': 'JYK',
'kaian': 'KAT',
'kaifeng': 'KFF',
'kaifengbei': 'KBF',
'kaijiang': 'KAW',
'kaili': 'KLW',
'kailinan': 'QKW',
'kailu': 'KLC',
'kaitong': 'KTT',
'kaiyang': 'KVW',
'kaiyuan': 'KYT',
'kaiyuanxi': 'KXT',
'kalaqi': 'KQX',
'kangcheng': 'KCP',
'kangjinjing': 'KJB',
'kangxiling': 'KXZ',
'kangzhuang': 'KZP',
'kashi': 'KSR',
'kedong': 'KOB',
'kelamayi': 'KHR',
'kelan': 'KLV',
'keshan': 'KSB',
'keyihe': 'KHX',
'kouqian': 'KQL',
'kuandian': 'KDT',
'kuche': 'KCR',
'kuduer': 'KDX',
'kuerle': 'KLR',
'kuishan': 'KAB',
'kuitan': 'KTQ',
'kuitun': 'KTR',
'kulun': 'KLD',
'kunming': 'KMM',
'kunmingxi': 'KXM',
'kunshan': 'KSH',
'kunshannan': 'KNH',
'kunyang': 'KAM',
'lagu': 'LGB',
'laha': 'LHX',
'laibin': 'UBZ',
'laibinbei': 'UCZ',
'laituan': 'LVZ',
'laiwudong': 'LWK',
'laiwuxi': 'UXK',
'laixi': 'LXK',
'laixibei': 'LBK',
'laiyang': 'LYK',
'laiyuan': 'LYP',
'laizhou': 'LZS',
'lalin': 'LAB',
'lamadian': 'LMX',
'lancun': 'LCK',
'langang': 'LNB',
'langfang': 'LJP',
'langfangbei': 'LFP',
'langweishan': 'LRJ',
'langxiang': 'LXB',
'langzhong': 'LZE',
'lankao': 'LKF',
'lankaonan': 'LUF',
'lanling': 'LLB',
'lanlingbei': 'COK',
'lanxi': 'LWH',
'lanzhou': 'LZJ',
'lanzhoudong': 'LVJ',
'lanzhouxi': 'LAJ',
'lanzhouxinqu': 'LQJ',
'laobian': 'LLT',
'laochengzhen': 'ACQ',
'laofu': 'UFD',
'laolai': 'LAX',
'laoying': 'LXL',
'lasa': 'LSO',
'lazha': 'LEM',
'lechang': 'LCQ',
'ledong': 'UQQ',
'ledu': 'LDO',
'ledunan': 'LVO',
'leiyang': 'LYQ',
'leiyangxi': 'LPQ',
'leizhou': 'UAQ',
'lengshuijiangdong': 'UDQ',
'lepingshi': 'LPG',
'leshan': 'IVW',
'leshanbei': 'UTW',
'leshancun': 'LUM',
'liangdang': 'LDY',
'liangdixia': 'LDP',
'lianggezhuang': 'LGP',
'liangjia': 'UJT',
'liangjiadian': 'LRT',
'liangping': 'UQW',
'liangpingnan': 'LPE',
'liangshan': 'LMK',
'lianjiang': 'LJZ',
'lianjiangkou': 'LHB',
'lianshanguan': 'LGT',
'lianyuan': 'LAQ',
'lianyungang': 'UIH',
'lianyungangdong': 'UKH',
'liaocheng': 'UCK',
'liaoyang': 'LYT',
'liaoyuan': 'LYL',
'liaozhong': 'LZD',
'licheng': 'UCP',
'lichuan': 'LCN',
'liduigongyuan': 'INW',
'lijia': 'LJB',
'lijiang': 'LHM',
'lijiaping': 'LIJ',
'lijinnan': 'LNK',
'lilinbei': 'KBQ',
'liling': 'LLG',
'lilingdong': 'UKQ',
'limudian': 'LMB',
'lincheng': 'UUP',
'linchuan': 'LCG',
'lindong': 'LRC',
'linfen': 'LFV',
'linfenxi': 'LXV',
'lingaonan': 'KGQ',
'lingbao': 'LBF',
'lingbaoxi': 'LPF',
'lingbi': 'GMH',
'lingcheng': 'LGK',
'linghai': 'JID',
'lingling': 'UWZ',
'lingqiu': 'LVV',
'lingshi': 'LSV',
'lingshidong': 'UDV',
'lingshui': 'LIQ',
'lingwu': 'LNJ',
'lingyuan': 'LYD',
'lingyuandong': 'LDD',
'linhai': 'UFH',
'linhe': 'LHC',
'linjialou': 'ULK',
'linjiang': 'LQL',
'linkou': 'LKB',
'linli': 'LWQ',
'linqing': 'UQK',
'linshengpu': 'LBT',
'linxi': 'LXC',
'linxiang': 'LXQ',
'linyi': 'LVK',
'linyibei': 'UYK',
'linying': 'LNF',
'linyuan': 'LYX',
'linze': 'LEJ',
'linzenan': 'LDJ',
'liquan': 'LGY',
'lishizhai': 'LET',
'lishui': 'LDH',
'lishuzhen': 'LSB',
'litang': 'LTZ',
'liudaohezi': 'LVP',
'liuhe': 'LNL',
'liuhezhen': 'LEX',
'liujiadian': 'UDT',
'liujiahe': 'LVT',
'liulinnan': 'LKV',
'liupanshan': 'UPJ',
'liupanshui': 'UMW',
'liushuigou': 'USP',
'liushutun': 'LSD',
'liuyuan': 'DHR',
'liuyuannan': 'LNR',
'liuzhi': 'LIW',
'liuzhou': 'LZZ',
'liwang': 'VLJ',
'lixian': 'LEQ',
'liyang': 'LEH',
'lizhi': 'LZX',
'longandong': 'IDZ',
'longchang': 'LCW',
'longchangbei': 'NWW',
'longchuan': 'LUQ',
'longdongbao': 'FVW',
'longfeng': 'KFQ',
'longgou': 'LGJ',
'longgudian': 'LGM',
'longhua': 'UHP',
'longjia': 'UJL',
'longjiang': 'LJX',
'longjing': 'LJL',
'longli': 'LLW',
'longlibei': 'KFW',
'longnan': 'UNG',
'longquansi': 'UQJ',
'longshanzhen': 'LAS',
'longshi': 'LAG',
'longtangba': 'LBM',
'longxi': 'LXJ',
'longxian': 'LXY',
'longyan': 'LYS',
'longyou': 'LMH',
'longzhen': 'LZA',
'longzhuagou': 'LZT',
'loudi': 'LDQ',
'loudinan': 'UOQ',
'luan': 'UAH',
'luanhe': 'UDP',
'luanheyan': 'UNP',
'luanping': 'UPP',
'luanxian': 'UXP',
'luchaogang': 'UCH',
'lucheng': 'UTP',
'luchuan': 'LKZ',
'ludao': 'LDL',
'lueyang': 'LYY',
'lufan': 'LVM',
'lufeng': 'LLQ',
'lufengnan': 'LQM',
'lugou': 'LOM',
'lujiang': 'UJH',
'lukoupu': 'LKQ',
'luliang': 'LRM',
'lulong': 'UAP',
'luntai': 'LAR',
'luocheng': 'VCZ',
'luofa': 'LOP',
'luohe': 'LON',
'luohexi': 'LBN',
'luojiang': 'LJW',
'luojiangdong': 'IKW',
'luomen': 'LMJ',
'luoping': 'LPM',
'luopoling': 'LPP',
'luoshan': 'LRN',
'luotuoxiang': 'LTJ',
'luowansanjiang': 'KRW',
'luoyang': 'LYF',
'luoyangdong': 'LDF',
'luoyanglongmen': 'LLF',
'luoyuan': 'LVS',
'lushan': 'LSG',
'lushuihe': 'LUL',
'lutai': 'LTP',
'luxi': 'LUG',
'luzhai': 'LIZ',
'luzhaibei': 'LSZ',
'lvboyuan': 'LCF',
'lvhua': 'LWJ',
'lvliang': 'LHV',
'lvshun': 'LST',
'maanshan': 'MAH',
'maanshandong': 'OMH',
'macheng': 'MCN',
'machengbei': 'MBN',
'mahuang': 'MHZ',
'maiyuan': 'MYS',
'malan': 'MLR',
'malianhe': 'MHB',
'malin': 'MID',
'malong': 'MGM',
'manasi': 'MSR',
'manasihu': 'MNR',
'mangui': 'MHX',
'manshuiwan': 'MKW',
'manzhouli': 'MLX',
'maoba': 'MBY',
'maobaguan': 'MGY',
'maocaoping': 'KPM',
'maoershan': 'MRB',
'maolin': 'MLD',
'maoling': 'MLZ',
'maoming': 'MDQ',
'maomingxi': 'MMZ',
'maoshezu': 'MOM',
'maqiaohe': 'MQB',
'masanjia': 'MJT',
'mashan': 'MAB',
'mawei': 'VAW',
'mayang': 'MVQ',
'meihekou': 'MHL',
'meilan': 'MHQ',
'meishan': 'MSW',
'meishandong': 'IUW',
'meixi': 'MEB',
'meizhou': 'MOQ',
'mengdonghe': 'MUQ',
'mengjiagang': 'MGB',
'mengzhuang': 'MZF',
'mengzi': 'MZM',
'mengzibei': 'MBM',
'menyuan': 'MYO',
'mianchi': 'MCF',
'mianchinan': 'MNF',
'mianduhe': 'MDX',
'mianning': 'UGW',
'mianxian': 'MVY',
'mianyang': 'MYW',
'miaocheng': 'MAP',
'miaoling': 'MLL',
'miaoshan': 'MSN',
'miaozhuang': 'MZJ',
'midu': 'MDF',
'miluo': 'MLQ',
'miluodong': 'MQQ',
'mingcheng': 'MCL',
'minggang': 'MGN',
'minggangdong': 'MDN',
'mingguang': 'MGH',
'mingshuihe': 'MUT',
'mingzhu': 'MFQ',
'minhenan': 'MNO',
'minle': 'MBJ',
'minqing': 'MQS',
'minqingbei': 'MBS',
'minquan': 'MQF',
'minquanbei': 'MIF',
'mishan': 'MSB',
'mishazi': 'MST',
'miyi': 'MMW',
'miyunbei': 'MUP',
'mizhi': 'MEY',
'modaoshi': 'MOB',
'moerdaoga': 'MRX',
'mohe': 'MVX',
'moyu': 'MUR',
'mudanjiang': 'MDB',
'muling': 'MLB',
'mulitu': 'MUD',
'mupang': 'MPQ',
'muping': 'MBK',
'nailin': 'NLD',
'naiman': 'NMD',
'naluo': 'ULZ',
'nanboshan': 'NBK',
'nanbu': 'NBE',
'nancao': 'NEF',
'nancha': 'NCB',
'nanchang': 'NCG',
'nanchangxi': 'NXG',
'nancheng': 'NDG',
'nanchengsi': 'NSP',
'nanchong': 'NCW',
'nanchongbei': 'NCE',
'nandamiao': 'NMP',
'nandan': 'NDZ',
'nanfen': 'NFT',
'nanfenbei': 'NUT',
'nanfeng': 'NFG',
'nangongdong': 'NFP',
'nanguancun': 'NGP',
'nanguanling': 'NLT',
'nanhechuan': 'NHJ',
'nanhua': 'NHS',
'nanhudong': 'NDN',
'nanjiang': 'FIW',
'nanjiangkou': 'NDQ',
'nanjing': 'NJS',
'nanjingnan': 'NKH',
'nankou': 'NKP',
'nankouqian': 'NKT',
'nanlang': 'NNQ',
'nanling': 'LLH',
'nanmu': 'NMX',
'nanning': 'NNZ',
'nanningdong': 'NFZ',
'nanningxi': 'NXZ',
'nanping': 'NPS',
'nanpingbei': 'NBS',
'nanpingnan': 'NNS',
'nanqiao': 'NQD',
'nanqiu': 'NCK',
'nantai': 'NTT',
'nantong': 'NUH',
'nantou': 'NOQ',
'nanwanzi': 'NWP',
'nanxiangbei': 'NEH',
'nanxiong': 'NCQ',
'nanyang': 'NFF',
'nanyangzhai': 'NYF',
'nanyu': 'NUP',
'nanzamu': 'NZT',
'nanzhao': 'NAF',
'napu': 'NPZ',
'naqu': 'NQO',
'nayong': 'NYE',
'nehe': 'NHX',
'neijiang': 'NJW',
'neijiangbei': 'NKW',
'neixiang': 'NXF',
'nengjia': 'NJD',
'nenjiang': 'NGX',
'niangziguan': 'NIP',
'nianzishan': 'NZX',
'nihezi': 'NHD',
'nileke': 'NIR',
'nimu': 'NMO',
'ningan': 'NAB',
'ningbo': 'NGH',
'ningbodong': 'NVH',
'ningcun': 'NCZ',
'ningde': 'NES',
'ningdong': 'NOJ',
'ningdongnan': 'NDJ',
'ningguo': 'NNH',
'ninghai': 'NHH',
'ningjia': 'NVT',
'ninglingxian': 'NLF',
'ningming': 'NMZ',
'ningwu': 'NWV',
'ningxiang': 'NXQ',
'niujia': 'NJB',
'niuxintai': 'NXT',
'nongan': 'NAT',
'nuanquan': 'NQJ',
'paihuaibei': 'PHP',
'pananzhen': 'PAJ',
'panguan': 'PAM',
'panjiadian': 'PDP',
'panjin': 'PVD',
'panjinbei': 'PBD',
'panshi': 'PSL',
'panzhihua': 'PRW',
'panzhou': 'PAE',
'paozi': 'POD',
'peide': 'PDB',
'pengan': 'PAW',
'pengshan': 'PSW',
'pengshanbei': 'PPW',
'pengshui': 'PHW',
'pengyang': 'PYJ',
'pengze': 'PZG',
'pengzhou': 'PMW',
'piandian': 'PRP',
'pianling': 'PNT',
'piaoertun': 'PRT',
'pikou': 'PUT',
'pikounan': 'PKT',
'pingan': 'PAL',
'pinganyi': 'PNO',
'pinganzhen': 'PZT',
'pingbanan': 'PBE',
'pingbian': 'PBM',
'pingchang': 'PCE',
'pingdingshan': 'PEN',
'pingdingshanxi': 'BFF',
'pingdu': 'PAK',
'pingfang': 'PFB',
'pinggang': 'PGL',
'pingguan': 'PGM',
'pingguo': 'PGZ',
'pinghekou': 'PHM',
'pinghu': 'PHQ',
'pingliang': 'PIJ',
'pingliangnan': 'POJ',
'pingnannan': 'PAZ',
'pingquan': 'PQP',
'pingshan': 'PSB',
'pingshang': 'PSK',
'pingshe': 'PSV',
'pingshi': 'PSQ',
'pingtai': 'PVT',
'pingtian': 'PTM',
'pingwang': 'PWV',
'pingxiang': 'PXZ',
'pingxiangbei': 'PBG',
'pingxingguan': 'PGV',
'pingyang': 'PYX',
'pingyao': 'PYV',
'pingyaogucheng': 'PDV',
'pingyi': 'PIK',
'pingyu': 'PYP',
'pingyuan': 'PYK',
'pingyuanpu': 'PPJ',
'pingzhuang': 'PZD',
'pingzhuangnan': 'PND',
'pishan': 'PSR',
'pixian': 'PWW',
'pixianxi': 'PCW',
'pizhou': 'PJH',
'podixia': 'PXJ',
'puan': 'PAN',
'puanxian': 'PUE',
'pucheng': 'PCY',
'puchengdong': 'PEY',
'puding': 'PGW',
'pulandian': 'PLT',
'puning': 'PEQ',
'putaojing': 'PTW',
'putian': 'PTS',
'puwan': 'PWT',
'puxiong': 'POW',
'puyang': 'PYF',
'qianan': 'QOT',
'qianfeng': 'QFB',
'qianhe': 'QUY',
'qianjiang': 'QNW',
'qianjinzhen': 'QEB',
'qianmotou': 'QMP',
'qianshan': 'QXQ',
'qianwei': 'QWD',
'qianweitang': 'QWP',
'qianxian': 'QBY',
'qianyang': 'QOY',
'qiaotou': 'QAT',
'qiaoxi': 'QXJ',
'qichun': 'QRN',
'qidian': 'QDM',
'qidong': 'QMQ',
'qidongbei': 'QRQ',
'qifengta': 'QVP',
'qijiang': 'QJW',
'qijiapu': 'QBT',
'qilihe': 'QLD',
'qimen': 'QIH',
'qingan': 'QAB',
'qingbaijiangdong': 'QFW',
'qingchengshan': 'QSW',
'qingdao': 'QDK',
'qingdaobei': 'QHK',
'qingdui': 'QET',
'qingfeng': 'QFT',
'qinghe': 'QIP',
'qinghecheng': 'QYP',
'qinghemen': 'QHD',
'qinghuayuan': 'QHP',
'qingjianxian': 'QNY',
'qinglian': 'QEW',
'qinglong': 'QIB',
'qinglongshan': 'QGH',
'qingshan': 'QSB',
'qingshen': 'QVW',
'qingsheng': 'QSQ',
'qingshui': 'QUJ',
'qingshuibei': 'QEJ',
'qingtian': 'QVH',
'qingtongxia': 'QTJ',
'qingxian': 'QXP',
'qingxu': 'QUV',
'qingyangshan': 'QSJ',
'qingyuan': 'QYT',
'qingzhoushi': 'QZK',
'qinhuangdao': 'QTP',
'qinjia': 'QJB',
'qinjiazhuang': 'QZV',
'qinling': 'QLY',
'qinxian': 'QVV',
'qinyang': 'QYF',
'qinzhou': 'QRZ',
'qinzhoudong': 'QDZ',
'qionghai': 'QYQ',
'qiqihaer': 'QHX',
'qiqihaernan': 'QNB',
'qishan': 'QAY',
'qishuyan': 'QYH',
'qitaihe': 'QTB',
'qixian': 'QXV',
'qixiandong': 'QGV',
'qixiaying': 'QXC',
'qiyang': 'QWQ',
'qiyangbei': 'QVQ',
'qiying': 'QYJ',
'qiziwan': 'QZQ',
'quanjiao': 'INH',
'quanyang': 'QYL',
'quanzhou': 'QYS',
'quanzhoudong': 'QRS',
'quanzhounan': 'QNZ',
'queshan': 'QSN',
'qufu': 'QFK',
'qufudong': 'QAK',
'qujiang': 'QIM',
'qujing': 'QJM',
'qujiu': 'QJZ',
'quli': 'QLZ',
'qushuixian': 'QSO',
'quxian': 'QRW',
'quzhou': 'QEH',
'raoping': 'RVQ',
'raoyang': 'RVP',
'raoyanghe': 'RHD',
'renbu': 'RUO',
'renqiu': 'RQP',
'reshui': 'RSD',
'rikaze': 'RKO',
'rizhao': 'RZK',
'rongan': 'RAZ',
'rongchang': 'RCW',
'rongchangbei': 'RQW',
'rongcheng': 'RCK',
'ronggui': 'RUQ',
'rongjiang': 'RVW',
'rongshui': 'RSZ',
'rongxian': 'RXZ',
'rudong': 'RIH',
'rugao': 'RBH',
'ruian': 'RAH',
'ruichang': 'RCG',
'ruijin': 'RJG',
'rujigou': 'RQJ',
'rushan': 'ROK',
'ruyang': 'RYF',
'ruzhou': 'ROF',
'saihantala': 'SHC',
'salaqi': 'SLC',
'sandaohu': 'SDL',
'sanduxian': 'KKW',
'sanggendalai': 'OGC',
'sanguankou': 'OKJ',
'sangyuanzi': 'SAJ',
'sanhexian': 'OXP',
'sanhezhuang': 'SVP',
'sanhuizhen': 'OZW',
'sanjiadian': 'ODP',
'sanjianfang': 'SFX',
'sanjiangkou': 'SKD',
'sanjiangnan': 'SWZ',
'sanjiangxian': 'SOZ',
'sanjiazhai': 'SMM',
'sanjingzi': 'OJT',
'sanmenxia': 'SMF',
'sanmenxian': 'OQH',
'sanmenxianan': 'SCF',
'sanmenxiaxi': 'SXF',
'sanming': 'SMS',
'sanmingbei': 'SHS',
'sanshijia': 'SRD',
'sanshilipu': 'SST',
'sanshui': 'SJQ',
'sanshuibei': 'ARQ',
'sanshuinan': 'RNQ',
'sansui': 'QHW',
'santangji': 'SDH',
'sanya': 'JUQ',
'sanyangchuan': 'SYJ',
'sanyijing': 'OYD',
'sanying': 'OEJ',
'sanyuan': 'SAY',
'sanyuanpu': 'SYL',
'shache': 'SCR',
'shacheng': 'SCP',
'shahai': 'SED',
'shahe': 'SHP',
'shahekou': 'SKT',
'shaheshi': 'VOP',
'shahousuo': 'SSD',
'shalingzi': 'SLP',
'shanchengzhen': 'SCL',
'shandan': 'SDJ',
'shangbancheng': 'SBP',
'shangbanchengnan': 'OBP',
'shangcheng': 'SWN',
'shangdu': 'SXC',
'shanggaozhen': 'SVK',
'shanghai': 'SHH',
'shanghaihongqiao': 'AOH',
'shanghainan': 'SNH',
'shanghaixi': 'SXH',
'shanghang': 'JBS',
'shanghe': 'SOK',
'shangjia': 'SJB',
'shangluo': 'OLY',
'shangnan': 'ONY',
'shangqiu': 'SQF',
'shangqiunan': 'SPF',
'shangrao': 'SRG',
'shangwan': 'SWP',
'shangxipu': 'SXM',
'shangyaodun': 'SPJ',
'shangyu': 'BDH',
'shangyuan': 'SUD',
'shangyubei': 'SSH',
'shangzhi': 'SZB',
'shanhaiguan': 'SHD',
'shanhetun': 'SHL',
'shanpodong': 'SBN',
'shanshan': 'SSR',
'shanshanbei': 'SMR',
'shanshi': 'SQB',
'shantou': 'OTQ',
'shanwei': 'OGQ',
'shanyin': 'SNV',
'shaodong': 'FIQ',
'shaoguan': 'SNQ',
'shaoguandong': 'SGQ',
'shaojiatang': 'SJJ',
'shaoshan': 'SSQ',
'shaoshannan': 'INQ',
'shaowu': 'SWS',
'shaoxing': 'SOH',
'shaoxingbei': 'SLH',
'shaoyang': 'SYQ',
'shaoyangbei': 'OVQ',
'shapotou': 'SFJ',
'shaqiao': 'SQM',
'shatuo': 'SFM',
'shawanxian': 'SXR',
'shaxian': 'SAS',
'shelihu': 'VLD',
'shenchi': 'SMV',
'shenfang': 'OLH',
'shengfang': 'SUP',
'shenjia': 'OJB',
'shenjiahe': 'OJJ',
'shenjingzi': 'SWT',
'shenmu': 'OMY',
'shenqiu': 'SQN',
'shenshu': 'SWB',
'shentou': 'SEV',
'shenyang': 'SYT',
'shenyangbei': 'SBT',
'shenyangdong': 'SDT',
'shenyangnan': 'SOT',
'shenzhen': 'SZQ',
'shenzhenbei': 'IOQ',
'shenzhendong': 'BJQ',
'shenzhenpingshan': 'IFQ',
'shenzhenxi': 'OSQ',
'shenzhou': 'OZP',
'shexian': 'OEP',
'shexianbei': 'NPH',
'shiba': 'OBJ',
'shibing': 'AQW',
'shiboyuan': 'ZWT',
'shicheng': 'SCT',
'shidu': 'SEP',
'shihezi': 'SZR',
'shijiazhuang': 'SJP',
'shijiazhuangbei': 'VVP',
'shijiazi': 'SJD',
'shijiazui': 'SHM',
'shijingshannan': 'SRP',
'shilidian': 'OMP',
'shilin': 'SPB',
'shiling': 'SOL',
'shilinnan': 'LNM',
'shilong': 'SLQ',
'shimenxian': 'OMQ',
'shimenxianbei': 'VFQ',
'shiqiao': 'SQE',
'shiqiaozi': 'SQT',
'shiquanxian': 'SXY',
'shiren': 'SRL',
'shirencheng': 'SRB',
'shishan': 'SAD',
'shishanbei': 'NSQ',
'shiti': 'STE',
'shitou': 'OTB',
'shixian': 'SXL',
'shixiazi': 'SXJ',
'shixing': 'IPQ',
'shiyan': 'SNN',
'shizhuang': 'SNM',
'shizhuxian': 'OSW',
'shizong': 'SEM',
'shizuishan': 'QQJ',
'shoushan': 'SAT',
'shouyang': 'SYV',
'shuangchengbei': 'SBB',
'shuangchengpu': 'SCB',
'shuangfeng': 'OFB',
'shuangfengbei': 'NFQ',
'shuanghezhen': 'SEL',
'shuangji': 'SML',
'shuangliao': 'ZJD',
'shuangliujichang': 'IPW',
'shuangliuxi': 'IQW',
'shuangpai': 'SBZ',
'shuangyashan': 'SSB',
'shucheng': 'OCH',
'shuidong': 'SIL',
'shuifu': 'OTW',
'shuijiahu': 'SQH',
'shuiquan': 'SID',
'shuiyang': 'OYP',
'shuiyuan': 'OYJ',
'shulan': 'SLL',
'shule': 'SUR',
'shulehe': 'SHJ',
'shunchang': 'SCS',
'shunde': 'ORQ',
'shundexueyuan': 'OJQ',
'shunyi': 'SOP',
'shuozhou': 'SUV',
'shuyang': 'FMH',
'sidaowan': 'OUD',
'sifangtai': 'STB',
'siheyong': 'OHD',
'sihong': 'GQH',
'sihui': 'AHQ',
'sijialing': 'OLK',
'siping': 'SPT',
'sipingdong': 'PPT',
'sishui': 'OSK',
'sixian': 'GPH',
'siyang': 'MPH',
'song': 'SOB',
'songchenglu': 'SFF',
'songhe': 'SBM',
'songjiang': 'SAH',
'songjianghe': 'SJL',
'songjiangnan': 'IMH',
'songjiangzhen': 'OZL',
'songshu': 'SFT',
'songshuzhen': 'SSL',
'songtao': 'MZQ',
'songyuan': 'VYT',
'songyuanbei': 'OCT',
'songzi': 'SIN',
'suide': 'ODY',
'suifenhe': 'SFB',
'suihua': 'SHB',
'suiling': 'SIB',
'suining': 'NIW',
'suiping': 'SON',
'suixi': 'SXZ',
'suiyang': 'SYB',
'suizhong': 'SZD',
'suizhongbei': 'SND',
'suizhou': 'SZN',
'sujiatun': 'SXT',
'suning': 'SYP',
'sunjia': 'SUB',
'sunwu': 'SKB',
'sunzhen': 'OZY',
'suolun': 'SNT',
'suotuhan': 'SHX',
'susong': 'OAH',
'suzhou': 'OXH',
'suzhoubei': 'OHH',
'suzhoudong': 'SRH',
'suzhouxinqu': 'ITH',
'suzhouyuanqu': 'KAH',
'taerqi': 'TVX',
'taha': 'THX',
'tahe': 'TXX',
'taian': 'TID',
'taigu': 'TGV',
'taiguxi': 'TIV',
'taihe': 'THG',
'taihu': 'TKH',
'taikang': 'TKX',
'tailai': 'TLX',
'taimushan': 'TLS',
'taining': 'TNS',
'taipingchuan': 'TIT',
'taipingzhen': 'TEB',
'taiqian': 'TTK',
'taishan': 'TAK',
'taiyangshan': 'TYJ',
'taiyangsheng': 'TQT',
'taiyuan': 'TYV',
'taiyuanbei': 'TBV',
'taiyuandong': 'TDV',
'taiyuannan': 'TNV',
'taizhou': 'UTH',
'tancheng': 'TZK',
'tangbao': 'TBQ',
'tangchi': 'TCX',
'tanggu': 'TGP',
'tanghai': 'THM',
'tanghe': 'THF',
'tangjiawan': 'PDQ',
'tangshan': 'TSP',
'tangshanbei': 'FUP',
'tangshancheng': 'TCT',
'tangwanghe': 'THB',
'tangxunhu': 'THN',
'tangyin': 'TYF',
'tangyuan': 'TYB',
'tanjiajing': 'TNJ',
'taocun': 'TCK',
'taocunbei': 'TOK',
'taojiatun': 'TOT',
'taolaizhao': 'TPT',
'taonan': 'TVT',
'taoshan': 'TAB',
'tashizui': 'TIM',
'tayayi': 'TYP',
'tengxian': 'TAZ',
'tengzhou': 'TXK',
'tengzhoudong': 'TEK',
'tiandong': 'TDZ',
'tiandongbei': 'TBZ',
'tiangang': 'TGL',
'tianjin': 'TJP',
'tianjinbei': 'TBP',
'tianjinnan': 'TIP',
'tianjinxi': 'TXP',
'tianlin': 'TFZ',
'tianmen': 'TMN',
'tianmennan': 'TNN',
'tianqiaoling': 'TQL',
'tianshifu': 'TFT',
'tianshui': 'TSJ',
'tianyang': 'TRZ',
'tianyi': 'TND',
'tianzhen': 'TZV',
'tianzhu': 'TZJ',
'tianzhushan': 'QWH',
'tiechang': 'TCL',
'tieli': 'TLB',
'tieling': 'TLT',
'tielingxi': 'PXT',
'tingliang': 'TIZ',
'tonganyi': 'TAJ',
'tongbai': 'TBF',
'tongbei': 'TBB',
'tongcheng': 'TTH',
'tongdao': 'TRQ',
'tonggou': 'TOL',
'tongguan': 'TGY',
'tonghai': 'TAM',
'tonghua': 'THL',
'tonghuaxian': 'TXL',
'tongjiang': 'TJB',
'tongjunzhuang': 'TZP',
'tongliao': 'TLD',
'tongling': 'TJH',
'tonglingbei': 'KXH',
'tongnan': 'TVW',
'tongren': 'RDQ',
'tongrennan': 'TNW',
'tongtu': 'TUT',
'tongxiang': 'TCH',
'tongxin': 'TXJ',
'tongyuanpu': 'TYT',
'tongyuanpuxi': 'TST',
'tongzhouxi': 'TAP',
'tongzi': 'TZW',
'tongzilin': 'TEW',
'tuanjie': 'TIX',
'tuditangdong': 'TTN',
'tuguiwula': 'TGC',
'tuha': 'THR',
'tuliemaodu': 'TMD',
'tulihe': 'TEX',
'tulufan': 'TFR',
'tulufanbei': 'TAR',
'tumen': 'TML',
'tumenbei': 'QSL',
'tumenzi': 'TCJ',
'tumuertai': 'TRC',
'tuoyaoling': 'TIL',
'tuqiang': 'TQX',
'tuqiaozi': 'TQJ',
'tuxi': 'TSW',
'wafangdian': 'WDT',
'wafangdianxi': 'WXT',
'waitoushan': 'WIT',
'walagan': 'WVX',
'wanfatun': 'WFB',
'wanganzhen': 'WVP',
'wangcang': 'WEW',
'wangdu': 'WDP',
'wangfu': 'WUT',
'wanggang': 'WGB',
'wangjiawan': 'WJJ',
'wangjiayingxi': 'KNM',
'wangou': 'WGL',
'wangqing': 'WQL',
'wangtong': 'WTP',
'wangtuanzhuang': 'WZJ',
'wangyang': 'WYB',
'wangzhaotun': 'WZB',
'wanle': 'WEB',
'wannian': 'WWG',
'wanning': 'WNQ',
'wanyuan': 'WYY',
'wanzhou': 'WYW',
'wanzhoubei': 'WZE',
'wawushan': 'WAH',
'wayaotian': 'WIM',
'weidong': 'WVT',
'weifang': 'WFK',
'weihai': 'WKK',
'weihaibei': 'WHK',
'weihe': 'WHB',
'weihui': 'WHF',
'weihulingbei': 'WBL',
'weijin': 'WJL',
'weinan': 'WNY',
'weinanbei': 'WBY',
'weinannan': 'WVY',
'weinanzhen': 'WNJ',
'weiqing': 'WAM',
'weishanzhuang': 'WSP',
'weishe': 'WSM',
'weixing': 'WVB',
'weizhangzi': 'WKD',
'weizhuang': 'WZY',
'weizigou': 'WZL',
'weizizhen': 'WQP',
'wenan': 'WBP',
'wenchang': 'WEQ',
'wenchun': 'WDB',
'wendeng': 'WBK',
'wendengdong': 'WGK',
'wendi': 'WNZ',
'wenling': 'VHH',
'wenshui': 'WEV',
'wenxi': 'WXV',
'wenxixi': 'WOV',
'wenzhou': 'RZH',
'wenzhounan': 'VRH',
'woken': 'WQB',
'wolitun': 'WLX',
'wopi': 'WPT',
'wuan': 'WAP',
'wuchagou': 'WCT',
'wuchang': 'WCB',
'wudalianchi': 'WRB',
'wudangshan': 'WRN',
'wudaogou': 'WDL',
'wudaohe': 'WHP',
'wuerqihan': 'WHX',
'wufushan': 'WFG',
'wugong': 'WGY',
'wuguantian': 'WGM',
'wuhai': 'WVC',
'wuhaixi': 'WXC',
'wuhan': 'WHN',
'wuhu': 'WHH',
'wuji': 'WJP',
'wujia': 'WUB',
'wujiachuan': 'WCJ',
'wujiatun': 'WJT',
'wukeshu': 'WKT',
'wulanhada': 'WLC',
'wulanhaote': 'WWT',
'wulashan': 'WSC',
'wulateqianqi': 'WQC',
'wulian': 'WLK',
'wulong': 'WLW',
'wulongbei': 'WBT',
'wulongbeidong': 'WMT',
'wulongquannan': 'WFN',
'wulumuqi': 'WAR',
'wulumuqinan': 'WMR',
'wunuer': 'WRX',
'wunvshan': 'WET',
'wupu': 'WUY',
'wuqiao': 'WUP',
'wuqing': 'WWP',
'wushan': 'WSJ',
'wusheng': 'WSE',
'wutaishan': 'WSV',
'wuwei': 'WUJ',
'wuweinan': 'WWJ',
'wuwu': 'WVR',
'wuxi': 'WXR',
'wuxiang': 'WVV',
'wuxidong': 'WGH',
'wuxixinqu': 'IFH',
'wuxu': 'WYZ',
'wuxue': 'WXN',
'wuyi': 'RYH',
'wuyibei': 'WDH',
'wuyiling': 'WPB',
'wuying': 'WWB',
'wuyishan': 'WAS',
'wuyishanbei': 'WBS',
'wuyishandong': 'WCS',
'wuyuan': 'WYC',
'wuzhai': 'WZV',
'wuzhi': 'WIF',
'wuzhou': 'WZZ',
'wuzhounan': 'WBZ',
'xiabancheng': 'EBP',
'xiachengzi': 'XCB',
'xiaguanying': 'XGJ',
'xiahuayuan': 'XYP',
'xiajiang': 'EJG',
'xiamatang': 'XAT',
'xiamen': 'XMS',
'xiamenbei': 'XKS',
'xiamengaoqi': 'XBS',
'xian': 'XAY',
'xianbei': 'EAY',
'xiangcheng': 'ERN',
'xiangfang': 'XFB',
'xiangfen': 'XFV',
'xiangfenxi': 'XTV',
'xianghe': 'XXB',
'xianglan': 'XNB',
'xiangtan': 'XTQ',
'xiangtanbei': 'EDQ',
'xiangtang': 'XTG',
'xiangxiang': 'XXQ',
'xiangyang': 'XFN',
'xiangyangdong': 'XWN',
'xiangyuan': 'EIF',
'xiangyun': 'EXM',
'xianlin': 'XPH',
'xiannan': 'CAY',
'xianning': 'XNN',
'xianningbei': 'XRN',
'xianningdong': 'XKN',
'xianningnan': 'UNN',
'xianrenqiao': 'XRL',
'xiantaoxi': 'XAN',
'xianyang': 'XYY',
'xianyangqindu': 'XOY',
'xianyou': 'XWS',
'xiaocun': 'XEM',
'xiaodejiang': 'EJM',
'xiaodong': 'XOD',
'xiaogan': 'XGN',
'xiaoganbei': 'XJN',
'xiaoheyan': 'XYD',
'xiaohezhen': 'EKY',
'xiaojinkou': 'NKQ',
'xiaolan': 'EAQ',
'xiaoling': 'XLB',
'xiaonan': 'XNV',
'xiaoshao': 'XAM',
'xiaoshi': 'XST',
'xiaosigou': 'ESP',
'xiaoxi': 'XOV',
'xiaoxianbei': 'QSH',
'xiaoxinjie': 'XXM',
'xiaoxizhuang': 'XXP',
'xiaoyangqi': 'XYX',
'xiaoyuejiu': 'XFM',
'xiaoyugu': 'XHM',
'xiapu': 'XOS',
'xiashe': 'XSV',
'xiashi': 'XIZ',
'xiataizi': 'EIP',
'xiayixian': 'EJH',
'xibali': 'XLP',
'xichang': 'ECW',
'xichangnan': 'ENW',
'xidamiao': 'XMP',
'xide': 'EDW',
'xiehejian': 'EEP',
'xiejiazhen': 'XMT',
'xifeng': 'XFW',
'xigangzi': 'NBB',
'xigu': 'XIJ',
'xigucheng': 'XUJ',
'xihudong': 'WDQ',
'xijiekou': 'EKM',
'xilin': 'XYB',
'xilinhaote': 'XTC',
'xiliu': 'GCT',
'ximashan': 'XMB',
'xinan': 'EAM',
'xinanxian': 'XAF',
'xinbaoan': 'XAP',
'xinchengzi': 'XCT',
'xinchuoyuan': 'XRX',
'xindudong': 'EWW',
'xinfeng': 'EFG',
'xingan': 'XAZ',
'xinganbei': 'XDZ',
'xingcheng': 'XCD',
'xingguo': 'EUG',
'xinghexi': 'XEC',
'xingkai': 'EKB',
'xinglongdian': 'XDD',
'xinglongxian': 'EXP',
'xinglongzhen': 'XZB',
'xingning': 'ENQ',
'xingping': 'XPY',
'xingquanbu': 'XQJ',
'xingshu': 'XSB',
'xingshutun': 'XDT',
'xingtai': 'XTP',
'xingtaidong': 'EDP',
'xingye': 'SNZ',
'xingyi': 'XRZ',
'xinhe': 'XIR',
'xinhua': 'EHQ',
'xinhuanan': 'EJQ',
'xinhuang': 'XLQ',
'xinhuangxi': 'EWQ',
'xinhuatun': 'XAX',
'xinhui': 'EFQ',
'xining': 'XNO',
'xinji': 'ENP',
'xinjiang': 'XJV',
'xinjin': 'IRW',
'xinjinnan': 'ITW',
'xinle': 'ELP',
'xinli': 'XLJ',
'xinlin': 'XPX',
'xinlitun': 'XLD',
'xinlizhen': 'XGT',
'xinmin': 'XMD',
'xinpingtian': 'XPM',
'xinqing': 'XQB',
'xinqiu': 'XQD',
'xinsongpu': 'XOB',
'xinwopu': 'EPD',
'xinxian': 'XSN',
'xinxiang': 'XXF',
'xinxiangdong': 'EGF',
'xinxingxian': 'XGQ',
'xinyang': 'XUN',
'xinyangdong': 'OYN',
'xinyangzhen': 'XZJ',
'xinyi': 'EEQ',
'xinyouyi': 'EYB',
'xinyu': 'XUG',
'xinyubei': 'XBG',
'xinzhangfang': 'XZX',
'xinzhangzi': 'ERP',
'xinzhao': 'XZT',
'xinzhengjichang': 'EZF',
'xinzhou': 'XXV',
'xiongyuecheng': 'XYT',
'xiping': 'XPN',
'xipu': 'XIW',
'xipudong': 'XAW',
'xishui': 'XZN',
'xiushan': 'ETW',
'xiuwu': 'XWF',
'xiuwuxi': 'EXF',
'xiwuqi': 'XWC',
'xixia': 'XIF',
'xixian': 'ENN',
'xixiang': 'XQY',
'xixiaozhao': 'XZC',
'xiyangcun': 'XQF',
'xizhelimu': 'XRD',
'xizi': 'XZD',
'xuancheng': 'ECH',
'xuangang': 'XGV',
'xuanhan': 'XHY',
'xuanhe': 'XWJ',
'xuanhua': 'XHP',
'xuanwei': 'XWM',
'xuanzhong': 'XRP',
'xuchang': 'XCF',
'xuchangdong': 'XVF',
'xujia': 'XJB',
'xujiatai': 'XTJ',
'xujiatun': 'XJT',
'xunyang': 'XUY',
'xunyangbei': 'XBY',
'xupu': 'EPQ',
'xupunan': 'EMQ',
'xusanwan': 'XSJ',
'xushui': 'XSP',
'xuwen': 'XJQ',
'xuzhou': 'XCH',
'xuzhoudong': 'UUH',
'yabuli': 'YBB',
'yabulinan': 'YWB',
'yakeshi': 'YKX',
'yalongwan': 'TWQ',
'yanan': 'YWY',
'yancheng': 'YEK',
'yanchi': 'YAP',
'yanchuan': 'YYY',
'yandangshan': 'YGH',
'yangang': 'YGW',
'yangcao': 'YAB',
'yangcaodi': 'YKM',
'yangcha': 'YAL',
'yangchang': 'YED',
'yangcheng': 'YNF',
'yangchenghu': 'AIH',
'yangchun': 'YQQ',
'yangcun': 'YBP',
'yanggang': 'YRB',
'yanggao': 'YOV',
'yanggu': 'YIK',
'yanghe': 'GTH',
'yangjiuhe': 'YHM',
'yanglin': 'YLM',
'yangling': 'YSY',
'yanglingnan': 'YEY',
'yangliuqing': 'YQP',
'yangmingbu': 'YVV',
'yangpingguan': 'YAY',
'yangpu': 'ABM',
'yangqu': 'YQV',
'yangquan': 'AQP',
'yangquanbei': 'YPP',
'yangquanqu': 'YYV',
'yangshuling': 'YAD',
'yangshuo': 'YCZ',
'yangweishao': 'YWM',
'yangxin': 'YVK',
'yangyi': 'ARP',
'yangzhangzi': 'YZD',
'yangzhewo': 'AEM',
'yangzhou': 'YLH',
'yanhecheng': 'YHP',
'yanhui': 'AEP',
'yanji': 'YJL',
'yanjiao': 'AJP',
'yanjiazhuang': 'AZK',
'yanjin': 'AEW',
'yanjixi': 'YXL',
'yanliang': 'YNY',
'yanling': 'YAG',
'yanqi': 'YSR',
'yanqing': 'YNP',
'yanshan': 'AOP',
'yanshi': 'YSF',
'yantai': 'YAK',
'yantainan': 'YLK',
'yantongshan': 'YSL',
'yantongtun': 'YUX',
'yanzhou': 'YZK',
'yanzibian': 'YZY',
'yaoan': 'YAC',
'yaojia': 'YAT',
'yaoqianhutun': 'YQT',
'yaoshang': 'ASP',
'yatunpu': 'YTZ',
'yayuan': 'YYL',
'yazhou': 'YUQ',
'yebaishou': 'YBD',
'yecheng': 'YER',
'yesanpo': 'AIP',
'yian': 'YAX',
'yibin': 'YBW',
'yichang': 'YCN',
'yichangdong': 'HAN',
'yicheng': 'YIN',
'yichun': 'YEG',
'yichunxi': 'YCG',
'yiershi': 'YET',
'yijiang': 'RVH',
'yijianpu': 'YJT',
'yilaha': 'YLX',
'yiliang': 'ALW',
'yiliangbei': 'YSM',
'yilin': 'YLB',
'yima': 'YMF',
'yimianpo': 'YPB',
'yimianshan': 'YST',
'yimin': 'YMX',
'yinai': 'YVM',
'yinan': 'YNK',
'yinchuan': 'YIJ',
'yindi': 'YDM',
'yingbinlu': 'YFW',
'yingcheng': 'YHN',
'yingchengzi': 'YCT',
'yingchun': 'YYB',
'yingde': 'YDQ',
'yingdexi': 'IIQ',
'yingjie': 'YAM',
'yingjisha': 'YIR',
'yingkou': 'YKT',
'yingkoudong': 'YGT',
'yingpanshui': 'YZJ',
'yingshan': 'NUW',
'yingshouyingzi': 'YIP',
'yingtan': 'YTG',
'yingtanbei': 'YKG',
'yingxian': 'YZV',
'yining': 'YMR',
'yiningdong': 'YNR',
'yinlang': 'YJX',
'yinping': 'KPQ',
'yintan': 'CTQ',
'yishui': 'YUK',
'yitulihe': 'YEX',
'yiwu': 'YWH',
'yixian': 'YXD',
'yixing': 'YUH',
'yiyang': 'AEQ',
'yizheng': 'UZH',
'yizhou': 'YSZ',
'yizi': 'YQM',
'yongan': 'YAS',
'yonganxiang': 'YNB',
'yongchengbei': 'RGH',
'yongchuan': 'YCW',
'yongchuandong': 'WMW',
'yongdeng': 'YDJ',
'yongding': 'YGS',
'yongfengying': 'YYM',
'yongfunan': 'YBZ',
'yongji': 'YIV',
'yongjia': 'URH',
'yongjibei': 'AJV',
'yongkang': 'RFH',
'yongkangnan': 'QUH',
'yonglang': 'YLW',
'yongledian': 'YDY',
'yongshou': 'ASY',
'yongtai': 'YTS',
'yongxiu': 'ACG',
'yongzhou': 'AOQ',
'youhao': 'YOB',
'youxi': 'YXS',
'youxian': 'YOG',
'youxiannan': 'YXG',
'youyang': 'AFW',
'yuanbaoshan': 'YUD',
'yuandun': 'YAJ',
'yuanmou': 'YMM',
'yuanping': 'YPV',
'yuanqian': 'AQK',
'yuanshi': 'YSP',
'yuantan': 'YTQ',
'yuanyangzhen': 'YYJ',
'yucheng': 'YCK',
'yuchengxian': 'IXH',
'yuci': 'YCV',
'yudu': 'YDG',
'yuechi': 'AWW',
'yuejiajing': 'YGJ',
'yueliangtian': 'YUM',
'yueqing': 'UPH',
'yueshan': 'YBF',
'yuexi': 'YHW',
'yueyang': 'YYQ',
'yueyangdong': 'YIQ',
'yuge': 'VTM',
'yuhang': 'EVH',
'yujiang': 'YHG',
'yujiapu': 'YKP',
'yuliangpu': 'YLD',
'yulin': 'YLZ',
'yumen': 'YXJ',
'yunan': 'YKQ',
'yuncailing': 'ACP',
'yuncheng': 'YNV',
'yunchengbei': 'ABV',
'yundonghai': 'NAQ',
'yunfudong': 'IXQ',
'yunjusi': 'AFP',
'yunlianghe': 'YEF',
'yunmeng': 'YMN',
'yunshan': 'KZQ',
'yunxiao': 'YBS',
'yuping': 'YZW',
'yuquan': 'YQB',
'yushan': 'YNG',
'yushannan': 'YGG',
'yushe': 'YSV',
'yushi': 'YSJ',
'yushu': 'YRT',
'yushugou': 'YGP',
'yushutai': 'YUT',
'yushutun': 'YSX',
'yutianxian': 'ATP',
'yuxi': 'YXM',
'yuyao': 'YYH',
'yuyaobei': 'CTH',
'zaolin': 'ZIV',
'zaoqiang': 'ZVP',
'zaoyang': 'ZYN',
'zaozhuang': 'ZEK',
'zaozhuangdong': 'ZNK',
'zaozhuangxi': 'ZFK',
'zengjiapingzi': 'ZBW',
'zengkou': 'ZKE',
'zepu': 'ZPR',
'zerunli': 'ZLM',
'zhalainuoerxi': 'ZXX',
'zhalantun': 'ZTX',
'zhalute': 'ZLD',
'zhangbaiwan': 'ZUP',
'zhangdang': 'ZHT',
'zhanggutai': 'ZGD',
'zhangjiajie': 'DIQ',
'zhangjiakou': 'ZKP',
'zhangjiakounan': 'ZMP',
'zhanglan': 'ZLV',
'zhangmutou': 'ZOQ',
'zhangmutoudong': 'ZRQ',
'zhangping': 'ZPS',
'zhangpu': 'ZCS',
'zhangqiao': 'ZQY',
'zhangqiu': 'ZTK',
'zhangshu': 'ZSG',
'zhangshudong': 'ZOG',
'zhangweitun': 'ZWB',
'zhangwu': 'ZWD',
'zhangxin': 'ZIP',
'zhangye': 'ZYJ',
'zhangyexi': 'ZEJ',
'zhangzhou': 'ZUS',
'zhangzhoudong': 'GOS',
'zhanjiang': 'ZJZ',
'zhanjiangxi': 'ZWQ',
'zhaoan': 'ZDS',
'zhaobai': 'ZBP',
'zhaocheng': 'ZCV',
'zhaodong': 'ZDB',
'zhaofupu': 'ZFM',
'zhaoguang': 'ZGB',
'zhaohua': 'ZHW',
'zhaoqing': 'ZVQ',
'zhaoqingdong': 'FCQ',
'zhaotong': 'ZDW',
'zhashui': 'ZSY',
'zhazi': 'ZAL',
'zhelimu': 'ZLC',
'zhenan': 'ZEY',
'zhenchengdi': 'ZDV',
'zhengding': 'ZDP',
'zhengdingjichang': 'ZHP',
'zhengxiangbaiqi': 'ZXC',
'zhengzhou': 'ZZF',
'zhengzhoudong': 'ZAF',
'zhengzhouxi': 'XPF',
'zhenjiang': 'ZJH',
'zhenjiangnan': 'ZEH',
'zhenlai': 'ZLT',
'zhenping': 'ZPF',
'zhenxi': 'ZVT',
'zhenyuan': 'ZUW',
'zhian': 'ZAD',
'zhicheng': 'ZCN',
'zhifangdong': 'ZMN',
'zhijiang': 'ZPQ',
'zhijiangbei': 'ZIN',
'zhijin': 'IZW',
'zhijinbei': 'ZJE',
'zhongchuanjichang': 'ZJJ',
'zhonghe': 'ZHX',
'zhonghuamen': 'VNH',
'zhongjiacun': 'ZJY',
'zhongkai': 'KKQ',
'zhongmu': 'ZGF',
'zhongning': 'VNJ',
'zhongningdong': 'ZDJ',
'zhongningnan': 'ZNJ',
'zhongshan': 'ZSZ',
'zhongshanbei': 'ZGQ',
'zhongshanxi': 'ZAZ',
'zhongwei': 'ZWJ',
'zhongxiang': 'ZTN',
'zhongzhai': 'ZZM',
'zhoujia': 'ZOB',
'zhoujiatun': 'ZOD',
'zhoukou': 'ZKN',
'zhoushuizi': 'ZIT',
'zhuanghebei': 'ZUT',
'zhuangqiao': 'ZQH',
'zhuangzhi': 'ZUX',
'zhucheng': 'ZQK',
'zhuhai': 'ZHQ',
'zhuhaibei': 'ZIQ',
'zhuji': 'ZDH',
'zhujiagou': 'ZUB',
'zhujiawan': 'CWJ',
'zhujiayao': 'ZUJ',
'zhumadian': 'ZDN',
'zhumadianxi': 'ZLN',
'zhuozhou': 'ZXP',
'zhuozhoudong': 'ZAP',
'zhuozidong': 'ZDC',
'zhuozishan': 'ZZC',
'zhurihe': 'ZRC',
'zhuwo': 'ZOP',
'zhuyangxi': 'ZXW',
'zhuyuanba': 'ZAW',
'zhuzhou': 'ZZQ',
'zhuzhouxi': 'ZAQ',
'zibo': 'ZBK',
'zichang': 'ZHY',
'zigong': 'ZGW',
'zijingguan': 'ZYP',
'zixi': 'ZXS',
'ziyang': 'ZVY',
'ziyangbei': 'FYW',
'zizhong': 'ZZW',
'zizhongbei': 'WZW',
'zizhou': 'ZZY',
'zongxi': 'ZOY',
'zoucheng': 'ZIK',
'zunyi': 'ZIW',
'zuoling': 'ZSN'}
| gglinux/python_demo | ticket/stations.py | Python | mit | 58,283 | [
"ADF",
"ASE",
"VTK"
] | 38b4e29d27b0da59366e48dda05496652ecbcda834f70e43beceec4ceb2ec6bf |
r"""
This module is a VTK Web server application.
The following command line illustrate how to use it::
$ vtkpython .../vtk_web_graph.py --vertices 1000 --edges 400
Any VTK Web executable script come with a set of standard arguments that
can be overriden if need be::
--host localhost
Interface on which the HTTP server will listen on.
--port 8080
Port number on which the HTTP server will listen to.
--content /path-to-web-content/
Directory that you want to server as static web content.
By default, this variable is empty which mean that we rely on another server
to deliver the static content and the current process only focus on the
WebSocket connectivity of clients.
--authKey vtk-secret
Secret key that should be provided by the client to allow it to make any
WebSocket communication. The client will assume if none is given that the
server expect "vtk-secret" as secret key.
"""
# import to process args
import sys
import os
# import vtk modules.
from vtk import *
import json
import math
# import vtk web modules
from vtk.web import server, wamp, protocols
# import annotations
from autobahn.wamp import exportRpc
try:
import argparse
except ImportError:
# since Python 2.6 and earlier don't have argparse, we simply provide
# the source for the same as _argparse and we use it instead.
import _argparse as argparse
# =============================================================================
# Create custom File Opener class to handle clients requests
# =============================================================================
class _WebGraph(wamp.ServerProtocol):
# Application configuration
vertices = 1000
edges = 400
view = None
authKey = "vtkweb-secret"
def initialize(self):
global renderer, renderWindow, renderWindowInteractor, cone, mapper, actor
# Bring used components
self.registerVtkWebProtocol(protocols.vtkWebMouseHandler())
self.registerVtkWebProtocol(protocols.vtkWebViewPort())
self.registerVtkWebProtocol(protocols.vtkWebViewPortImageDelivery())
# Update authentication key to use
self.updateSecret(_WebGraph.authKey)
# Create default pipeline (Only once for all the sessions)
if not _WebGraph.view:
# Generate Random graph
random = vtkRandomGraphSource()
random.SetNumberOfVertices(_WebGraph.vertices)
random.SetNumberOfEdges(_WebGraph.edges)
random.SetStartWithTree(True)
random.Update()
graphData = random.GetOutput()
# Create view
view = vtkGraphLayoutView()
view.AddRepresentationFromInput(graphData)
# Customize Rendering
view.SetVertexLabelArrayName("vertex id")
view.SetVertexLabelVisibility(True)
view.SetVertexColorArrayName("vertex id")
view.SetColorVertices(True)
view.SetScalingArrayName("vertex id")
view.ScaledGlyphsOn()
view.HideVertexLabelsOnInteractionOn()
view.SetEdgeColorArrayName("edge id")
view.SetColorEdges(True)
view.SetLayoutStrategyToSpanTree()
# Set trackball interaction style
style = vtkInteractorStyleTrackballCamera()
view.GetRenderWindow().GetInteractor().SetInteractorStyle(style)
# VTK Web application specific
_WebGraph.view = view
view.ResetCamera()
view.Render()
self.Application.GetObjectIdMap().SetActiveObject("VIEW", view.GetRenderWindow())
@exportRpc("changeLayout")
def changeLayout(self, layoutName):
if layoutName == 'ForceDirected' :
print 'Layout Strategy = Force Directed'
_WebGraph.view.SetLayoutStrategyToForceDirected()
_WebGraph.view.GetLayoutStrategy().ThreeDimensionalLayoutOn()
if layoutName == 'SpanTree' :
print 'Layout Strategy = Span Tree (Depth First Off)'
_WebGraph.view.SetLayoutStrategyToSpanTree()
_WebGraph.view.GetLayoutStrategy().DepthFirstSpanningTreeOff()
elif layoutName == 'SpanTreeDepthFirst' :
print 'Layout Strategy = Span Tree (Depth First On)'
_WebGraph.view.SetLayoutStrategyToSpanTree()
_WebGraph.view.GetLayoutStrategy().DepthFirstSpanningTreeOn()
elif layoutName == 'Circular' :
print 'Layout Strategy = Circular'
_WebGraph.view.SetLayoutStrategyToCircular()
elif layoutName == 'Random' :
print 'Layout Strategy = Random'
_WebGraph.view.SetLayoutStrategyToRandom()
elif layoutName == 'Fast2D' :
print 'Layout Strategy = Fast 2D'
_WebGraph.view.SetLayoutStrategyToFast2D()
elif layoutName == 'Clustering2D' :
print 'Layout Strategy = Clustering 2D'
_WebGraph.view.SetLayoutStrategyToClustering2D()
elif layoutName == 'Community2D' :
print 'Layout Strategy = Community 2D'
_WebGraph.view.SetLayoutStrategyToCommunity2D()
_WebGraph.view.ResetCamera()
_WebGraph.view.Render()
# =============================================================================
# Main: Parse args and start server
# =============================================================================
if __name__ == "__main__":
# Create argument parser
parser = argparse.ArgumentParser(description="VTK/Web Graph web-application")
# Add default arguments
server.add_arguments(parser)
# Add local arguments
parser.add_argument("--vertices", help="Number of vertices used to generate graph", dest="vertices", type=int, default=1000)
parser.add_argument("--edges", help="Number of edges used to generate graph", dest="edges", type=int, default=400)
# Exctract arguments
args = parser.parse_args()
# Configure our current application
_WebGraph.authKey = args.authKey
_WebGraph.vertices = args.vertices
_WebGraph.edges = args.edges
# Start server
server.start_webserver(options=args, protocol=_WebGraph)
| biddisco/VTK | Web/Applications/GraphLayout/server/vtk_web_graph.py | Python | bsd-3-clause | 6,341 | [
"VTK"
] | b4f856184e84dce39d28c35d1ab4392fbdbdf224b456beb1a46ce85a7a2290e7 |
#
# Copyright 2016 The BigDL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# This example trains a super-resolution network on the BSD300 dataset,
# using crops from the 200 training images, and evaluating on crops of the 100 test images,
# and is adapted from
# https://github.com/pytorch/examples/tree/master/super_resolution
#
from __future__ import print_function
import argparse
from math import log10
from PIL import Image
import urllib
import tarfile
import os
from os import makedirs, remove, listdir
from os.path import exists, join, basename
import torch
import torch.utils.data as data
import torch.nn as nn
import torch.optim as optim
import torch.nn.init as init
from torch.utils.data import DataLoader
from torchvision.transforms import Compose, CenterCrop, ToTensor, Resize
from bigdl.orca import init_orca_context, stop_orca_context
from bigdl.orca.learn.pytorch import Estimator
from bigdl.orca.learn.metrics import MSE
from bigdl.orca.learn.trigger import EveryEpoch
parser = argparse.ArgumentParser(description='PyTorch Super Res Example')
parser.add_argument('--upscale_factor', type=int,
default=3, help="super resolution upscale factor")
parser.add_argument('--batch_size', type=int, default=64, help='training batch size')
parser.add_argument('--test_batch_size', type=int, default=10, help='testing batch size')
parser.add_argument('--epochs', type=int, default=2, help='number of epochs to train for')
parser.add_argument('--lr', type=float, default=0.01, help='Learning Rate. Default=0.01')
parser.add_argument('--threads', type=int, default=4,
help='number of threads for data loader to use')
parser.add_argument('--seed', type=int, default=123, help='random seed to use. Default=123')
parser.add_argument('--cluster_mode', type=str,
default='local', help='The mode of spark cluster.')
parser.add_argument('--backend', type=str, default="bigdl",
help='The backend of PyTorch Estimator; '
'bigdl, torch_distributed and spark are supported.')
parser.add_argument('--data_dir', type=str, default="./dataset", help='The path of datesets.')
opt = parser.parse_args()
print(opt)
if opt.cluster_mode == "local":
init_orca_context()
elif opt.cluster_mode.startswith("yarn"):
hadoop_conf = os.environ.get("HADOOP_CONF_DIR")
assert hadoop_conf, "Directory path to hadoop conf not found for yarn-client mode. Please " \
"set the environment variable HADOOP_CONF_DIR"
additional = None if not exists("dataset/BSDS300.zip") else "dataset/BSDS300.zip#dataset"
init_orca_context(cluster_mode=opt.cluster_mode, cores=4, num_nodes=2, hadoop_conf=hadoop_conf,
additional_archive=additional)
elif opt.cluster_mode == "spark-submit":
init_orca_context(cluster_mode="spark-submit")
else:
print("init_orca_context failed. cluster_mode should be one of 'local', 'yarn' and 'spark-submit' but got "
+ opt.cluster_mode)
def download_report(count, block_size, total_size):
downloaded = count * block_size
percent = 100. * downloaded / total_size
percent = min(100, percent)
print('downloaded %d, %.2f%% completed' % (downloaded, percent))
def download_bsd300(dest=opt.data_dir):
output_image_dir = join(dest, "BSDS300/images")
if not exists(output_image_dir):
makedirs(dest)
url = "http://www2.eecs.berkeley.edu/Research/Projects/CS/vision/bsds/BSDS300-images.tgz"
print("downloading url ", url)
file_path = join(dest, basename(url))
urllib.request.urlretrieve(url, file_path, download_report)
print("Extracting data")
with tarfile.open(file_path) as tar:
for item in tar:
tar.extract(item, dest)
remove(file_path)
return output_image_dir
def is_image_file(filename):
return any(filename.endswith(extension) for extension in [".png", ".jpg", ".jpeg"])
class DatasetFromFolder(data.Dataset):
def __init__(self, image_dir, input_transform=None, target_transform=None):
super(DatasetFromFolder, self).__init__()
self.image_filenames = [join(image_dir, x)
for x in listdir(image_dir) if is_image_file(x)]
self.input_transform = input_transform
self.target_transform = target_transform
def __getitem__(self, index):
input = load_img(self.image_filenames[index])
target = input.copy()
if self.input_transform:
input = self.input_transform(input)
if self.target_transform:
target = self.target_transform(target)
return input, target
def __len__(self):
return len(self.image_filenames)
def load_img(filepath):
img = Image.open(filepath).convert('YCbCr')
y, _, _ = img.split()
return y
def calculate_valid_crop_size(crop_size, upscale_factor):
return crop_size - (crop_size % upscale_factor)
def input_transform(crop_size, upscale_factor):
return Compose([
CenterCrop(crop_size),
Resize(crop_size // upscale_factor),
ToTensor(),
])
def target_transform(crop_size):
return Compose([
CenterCrop(crop_size),
ToTensor(),
])
def train_data_creator(config, batch_size):
def get_training_set(upscale_factor):
root_dir = download_bsd300()
train_dir = join(root_dir, "train")
crop_size = calculate_valid_crop_size(256, upscale_factor)
return DatasetFromFolder(train_dir,
input_transform=input_transform(crop_size, upscale_factor),
target_transform=target_transform(crop_size))
train_set = get_training_set(config.get("upscale_factor", 3))
training_data_loader = DataLoader(dataset=train_set,
batch_size=batch_size,
num_workers=0,
shuffle=True)
return training_data_loader
def validation_data_creator(config, batch_size):
def get_test_set(upscale_factor):
root_dir = download_bsd300()
test_dir = join(root_dir, "test")
crop_size = calculate_valid_crop_size(256, upscale_factor)
return DatasetFromFolder(test_dir,
input_transform=input_transform(crop_size, upscale_factor),
target_transform=target_transform(crop_size))
test_set = get_test_set(config.get("upscale_factor", 3))
testing_data_loader = DataLoader(dataset=test_set,
batch_size=batch_size,
num_workers=0,
shuffle=False)
return testing_data_loader
class Net(nn.Module):
def __init__(self, upscale_factor):
super(Net, self).__init__()
self.relu = nn.ReLU()
self.conv1 = nn.Conv2d(1, 64, (5, 5), (1, 1), (2, 2))
self.conv2 = nn.Conv2d(64, 64, (3, 3), (1, 1), (1, 1))
self.conv3 = nn.Conv2d(64, 32, (3, 3), (1, 1), (1, 1))
self.conv4 = nn.Conv2d(32, upscale_factor ** 2, (3, 3), (1, 1), (1, 1))
self.pixel_shuffle = nn.PixelShuffle(upscale_factor)
self._initialize_weights()
def forward(self, x):
x = self.relu(self.conv1(x))
x = self.relu(self.conv2(x))
x = self.relu(self.conv3(x))
x = self.pixel_shuffle(self.conv4(x))
return x
def _initialize_weights(self):
init.orthogonal_(self.conv1.weight, init.calculate_gain('relu'))
init.orthogonal_(self.conv2.weight, init.calculate_gain('relu'))
init.orthogonal_(self.conv3.weight, init.calculate_gain('relu'))
init.orthogonal_(self.conv4.weight)
def model_creator(config):
torch.manual_seed(config.get("seed", 123))
net = Net(upscale_factor=config.get("upscale_factor", 3))
return net
def optim_creator(model, config):
return optim.Adam(model.parameters(), lr=config.get("lr", 0.01))
criterion = nn.MSELoss()
model_dir = opt.data_dir+"/models"
if opt.backend == "bigdl":
model = model_creator(
config={
"upscale_factor": opt.upscale_factor,
"seed": opt.seed
}
)
optimizer = optim_creator(model, config={"lr": opt.lr})
estimator = Estimator.from_torch(
model=model,
optimizer=optimizer,
loss=criterion,
metrics=[MSE()],
model_dir=model_dir,
backend="bigdl"
)
train_loader = train_data_creator(
config={
"upscale_factor": opt.upscale_factor,
"threads": opt.threads
},
batch_size=opt.batch_size
)
test_loader = validation_data_creator(
config={
"upscale_factor": opt.upscale_factor,
"threads": opt.threads
},
batch_size=opt.batch_size
)
estimator.fit(data=train_loader, epochs=opt.epochs, validation_data=test_loader,
checkpoint_trigger=EveryEpoch())
val_stats = estimator.evaluate(data=test_loader)
print("===> Validation Complete: Avg. PSNR: {:.4f} dB, Avg. Loss: {:.4f}"
.format(10 * log10(1. / val_stats["MSE"]), val_stats["MSE"]))
elif opt.backend in ["torch_distributed", "spark"]:
estimator = Estimator.from_torch(
model=model_creator,
optimizer=optim_creator,
loss=criterion,
model_dir=os.getcwd(),
use_tqdm=True,
backend=opt.backend,
config={
"lr": opt.lr,
"upscale_factor": opt.upscale_factor,
"threads": opt.threads,
"seed": opt.seed
}
)
if not exists(model_dir):
makedirs(model_dir)
for epoch in range(1, opt.epochs + 1):
stats = estimator.fit(data=train_data_creator, epochs=1, batch_size=opt.batch_size)
for epochinfo in stats:
print("===> Epoch {} Complete: Avg. Loss: {:.4f}"
.format(epoch, epochinfo["train_loss"]))
val_stats = estimator.evaluate(data=validation_data_creator,
batch_size=opt.test_batch_size)
print("===> Validation Complete: Avg. PSNR: {:.4f} dB, Avg. Loss: {:.4f}"
.format(10 * log10(1. / val_stats["val_loss"]), val_stats["val_loss"]))
model_out_path = model_dir + "/" + "model_epoch_{}.pth".format(epoch)
model = estimator.get_model()
torch.save(model, model_out_path)
print("Checkpoint saved to {}".format(model_out_path))
else:
raise NotImplementedError("Only bigdl, torch_distributed, and spark are supported as the backend, "
"but got {}".format(opt.backend))
stop_orca_context()
| intel-analytics/BigDL | python/orca/example/learn/pytorch/super_resolution/super_resolution.py | Python | apache-2.0 | 11,377 | [
"ORCA"
] | 919db560dbcb868300557467f133d25992fe5c819fc6e817e3827a67d4aec0fd |
from __future__ import unicode_literals
from primesense import openni2
from primesense import _openni2 as c_api
import cv2
import time
import sys
import os
import random
import numpy as np
import matplotlib.pyplot as plt
from scipy import ndimage
# -------------------------------------------------------------
# input/output parameters
# input tensor
input_tensor_fn = 'tensors/black.pkl'
# output window size
FINAL_OUTPUT_WIDTH = 1440
FINAL_OUTPUT_HEIGHT = 900
# -------------------------------------------------------------
# -------------------------------------------------------------
# min/max depth parameters
# debug depth
show_depth = False
MIN_DEPTH = 500.0
MAX_DEPTH = 2200.0
# keys "a" and "z" control the minimun depth cliping
# keys "s" and "x" control the maximum depth cliping
# this is how much the min/max values change at every click registered
KEY_DEPTH_OFFSET = 100
# -------------------------------------------------------------
global input_tensor
global INPUT_WIDTH
global INPUT_HEIGHT
global depth_levels
depth_levels = 256
def load_tensor(input_tensor_fn):
'''
function that load a tensor created by either the video2input or the deepdream notebook
'''
global input_tensor
global INPUT_WIDTH
global INPUT_HEIGHT
global depth_levels
# input_tensor shape: (depth_leves, OUTPUT_HEIGHT, OUTPUT_WIDTH, 3)
input_tensor = np.load(input_tensor_fn)
input_tensor = input_tensor[ np.linspace(0, input_tensor.shape[0]-1, num=depth_levels, endpoint=True, dtype=np.int32)[::-1] ,:,:,:]
input_tensor = input_tensor[:,:,:, [2,1,0]]
INPUT_WIDTH, INPUT_HEIGHT = input_tensor.shape[2], input_tensor.shape[1]
print(' --------------------------------------')
print 'Input shape:', input_tensor.shape, input_tensor.dtype
print(' --------------------------------------')
def print_depth_frame_video(depth_data, input_tensor, thisType, show_depth):
'''
main function that does the merging of depth and input tensor
'''
# get depth in numpy and reshape
img = np.frombuffer(depth_data, dtype=thisType).astype(np.float32).reshape( (1, 424, 512) )
# center and clip to MIN_DEPTH - MAX_DEPTH
img[ np.where(img < MIN_DEPTH) ] = MAX_DEPTH+1000
img -= MIN_DEPTH
img = (np.clip(img, 0, MAX_DEPTH) / MAX_DEPTH) * (depth_levels-1)
img = np.squeeze(img).astype(np.uint8)
# optional: to remove artifacts, we use a dilation and gaussian filtering
# Feel free to comment the following two lines or play with the params
img = ndimage.grey_dilation(img, footprint=np.ones((2,2)))
img = ndimage.filters.gaussian_filter(img, sigma=0.3)
# resize to input_tensor/input size
img = cv2.resize(img, (INPUT_WIDTH, INPUT_HEIGHT))#, interpolation=cv2.INTER_LINEAR)
# for debugging
if show_depth:
return img
# python magic
i, j = img.shape
i, j = np.ogrid[:i, :j]
image = input_tensor[img, i, j, :]
if FINAL_OUTPUT_WIDTH > INPUT_WIDTH:
image = cv2.resize(image, (FINAL_OUTPUT_WIDTH, FINAL_OUTPUT_HEIGHT), interpolation=cv2.INTER_LINEAR)
return image
# load the input
load_tensor(input_tensor_fn)
openni2.initialize()
dev = openni2.Device.open_any()
depth_stream = dev.create_depth_stream()
depth_stream.set_video_mode(c_api.OniVideoMode(
pixelFormat=c_api.OniPixelFormat.ONI_PIXEL_FORMAT_DEPTH_100_UM, resolutionX = 512, resolutionY = 424, fps = 60))
cv2.namedWindow("Slihouettes")
# vars to count fps
start = time.time()
count_frames = 0
depth_stream.start()
while 1:
frame = depth_stream.read_frame()
depth_data = frame.get_buffer_as_uint16()
video_image = print_depth_frame_video(depth_data, input_tensor, np.uint16, show_depth)
cv2.imshow("Slihouettes", video_image)
# silly fps counting
count_frames += 1
fps = count_frames / (time.time() - start)
if count_frames % 10 == 0:
print( 'fps:', fps)
# key bindings
# keys "a" and "z" control the minimun depth cliping
# keys "s" and "x" control the maximum depth cliping
k = cv2.waitKey(5) & 0xFF
if k == 27: # ESC TO EXIT
break
k = cv2.waitKey(33) & 0xFF
if k == ord('a'):
MIN_DEPTH += KEY_DEPTH_OFFSET
print(' ------------------- MIN_DEPTH:', MIN_DEPTH)
if k == ord('z'):
MIN_DEPTH -= KEY_DEPTH_OFFSET
print(' ------------------- MIN_DEPTH:', MIN_DEPTH)
if k == ord('s'):
MAX_DEPTH -= KEY_DEPTH_OFFSET
print(' ------------------- MAX_DEPTH:', MAX_DEPTH)
if k == ord('x'):
MAX_DEPTH += KEY_DEPTH_OFFSET
print(' ------------------- MAX_DEPTH:', MAX_DEPTH)
cv2.destroyAllWindows()
depth_stream.stop() | skamalas/depth2time | silhouettes.py | Python | mit | 4,734 | [
"Gaussian"
] | f9eca1045a4aaed913fb0f8d61fb99739ed1b41ddc81f38e3d70be104e5ecb69 |
# Copyright (c) 2015-2017 Lester Hedges <lester.hedges+slsm@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
""" minimise_perimeter.py
An example code showing a hole shrinking during unconstrained
perimeter minimisation.
This demo provides a simple test to confirm that a single hole will
shrink with velocity proportional to its curvature.
At each iteration we optimise for the velocity vector that maximises
the reduction in the boundary perimeter, subject to the CFL displacement
limit. While this trivial optimisation could be performed by hand, it
provides a useful test of our numerical implementation, especially the
scaling that is required for stability inside the Optimise class.
For a perfect continuous circle, all boundary points should move at a
velocity proportional to the curvature, 1 / R. The time step will be
rescaled if the curvature is too large and boundary point velocities
cause violation of the CFL condition.
We compute the velocity by measuring the distance that the hole boundary
displaces as a function of time. This can be done by measuring the change
in the boundary length over time and comparing the radius at each time
interval (radius = perimeter / (2 x pi)).
The output file, "minimise_perimeter.txt", contains the measured distance vs
time data for the optmisation run. Additional columns provide data for the
velocity and mean curvature at each time step (computed by the code) as well
as the analytical curvature estimate of 1 / R. Level set information for each
sample interval is written to ParaView readable VTK files, "level-set_*.vtk".
Boundary segment data is written to "boundary-segments_*.txt".
"""
import math
import pyslsm
# Maximum displacement per iteration, in units of the mesh spacing.
# This is the CFL limit.
moveLimit = 0.05
# Set maximum running time.
maxTime = 3000
# Set sampling interval.
sampleInterval = 30
# Set time of the next sample.
nextSample = 30
# Create a hole at position (100, 100) with a radius of 80 grid units.
holes = pyslsm.VectorHole()
holes.append(pyslsm.Hole(100, 100, 80))
# Initialise a 200x200 level set domain.
levelSet = pyslsm.LevelSet(200, 200, holes, moveLimit, 6, True)
# Initialise io object.
io = pyslsm.InputOutput()
# Reinitialise the level set to a signed distance function.
levelSet.reinitialise()
# Initialise the boundary object.
boundary = pyslsm.Boundary()
# Perform initial boundary discretisation.
boundary.discretise(levelSet)
# Compute the initial boundary point normal vectors.
boundary.computeNormalVectors(levelSet)
# Number of cycles since signed distance reinitialisation.
nReinit = 0
# Running time.
runningTime = 0
# Time measurements.
times = pyslsm.VectorDouble()
# Boundary length measurements.
lengths = pyslsm.VectorDouble()
# Boundary curvature measurements.
curvatures = pyslsm.VectorDouble()
# Lambda values for the optimiser.
lambdas = pyslsm.VectorDouble([0])
print("\nStarting unconstrained perimeter minimisation demo...\n")
# Print output header.
print("--------------------------")
print("%6s %8s %10s" % ("Time", "Length", "Curvature"))
print("--------------------------")
# Integrate until we exceed the maximum time.
while runningTime < maxTime:
# Zero the curvature.
curvature = 0
# Initialise the sensitivity object.
sensitivity = pyslsm.Sensitivity()
# Initialise the sensitivity callback function.
cb = pyslsm.Callback()
cb.callback = boundary.computePerimeter
# Assign boundary point sensitivities.
for i in range(0, len(boundary.points)):
boundary.points[i].sensitivities[0] \
= sensitivity.computeSensitivity(boundary.points[i], cb.callback)
curvature += boundary.points[i].sensitivities[0]
# Compute mean curvature.
curvature /= len(boundary.points)
# Time step associated with the iteration.
timeStep = pyslsm.MutableFloat()
# Initialise the optimisation object.
# Since there are no constraints we pass an empty vector for the
# constraint distances argument.
optimise = pyslsm.Optimise(boundary.points, pyslsm.VectorDouble(), \
lambdas, timeStep, moveLimit)
# Perform the optimisation.
optimise.solve()
# Extend boundary point velocities to all narrow band nodes.
levelSet.computeVelocities(boundary.points)
# Compute gradient of the signed distance function within the narrow band.
levelSet.computeGradients()
# Update the level set function.
isReinitialised = levelSet.update(timeStep.value)
# Reinitialise the signed distance function, if necessary.
if (not isReinitialised):
# Reinitialise at least every 20 iterations.
if (nReinit == 20):
levelSet.reinitialise()
nReinit = 0
else:
nReinit = 0
# Increment the number of steps since reinitialisation.
nReinit += 1
# Compute the new discretised boundary.
boundary.discretise(levelSet)
# Compute the boundary point normal vectors.
boundary.computeNormalVectors(levelSet)
# Increment the time.
runningTime += timeStep.value
# Check if the next sample time has been reached.
while runningTime >= nextSample:
# Record the time and boundary length.
times.append(runningTime)
lengths.append(boundary.length)
curvatures.append(curvature)
# Update the time of the next sample.
nextSample += sampleInterval
# Print statistics.
print("%6.1f %8.1f %10.4f" % (runningTime, boundary.length, curvature))
# Write level set and boundary segments to file.
io.saveLevelSetVTK(len(times), levelSet)
io.saveBoundarySegmentsTXT(len(times), boundary)
# Distance measurements.
distances = pyslsm.VectorDouble()
# Compute the distance moved at each time interval.
for i in range(0, len(times)):
distances.append((lengths[0] - lengths[i]) / (2 * math.pi))
# Print results to file (distance vs time).
file = open("minimise_perimeter.txt", "w")
for i in range(1, len(times)):
# Distance and time increments.
deltaDist = distances[i] - distances[i-1]
deltaTime = times[i] - times[i-1]
file.write("%lf %lf %lf %lf %lf\n" % (times[i] - times[0], \
distances[i], deltaDist / deltaTime, curvatures[i], ((2 * math.pi) / lengths[i])))
file.close()
print("\nDone!")
| lohedges/slsm | python/minimise_perimeter.py | Python | gpl-3.0 | 7,029 | [
"ParaView",
"VTK"
] | 254145d135da0206a60db20a78d3ce45902317f0a5ee290b321cbf9f73371b32 |
""" Test class for JobWrapper
"""
import os
import shutil
import pytest
from mock import MagicMock
from DIRAC import gLogger
from DIRAC.DataManagementSystem.Client.test.mock_DM import dm_mock
from DIRAC.Resources.Catalog.test.mock_FC import fc_mock
from DIRAC.WorkloadManagementSystem.JobWrapper.JobWrapper import JobWrapper
from DIRAC.WorkloadManagementSystem.JobWrapper.Watchdog import Watchdog
from DIRAC.WorkloadManagementSystem.Client import JobStatus, JobMinorStatus
getSystemSectionMock = MagicMock()
getSystemSectionMock.return_value = "aValue"
gLogger.setLevel("DEBUG")
def test_InputData(mocker):
mocker.patch(
"DIRAC.WorkloadManagementSystem.JobWrapper.JobWrapper.getSystemSection", side_effect=getSystemSectionMock
)
mocker.patch("DIRAC.WorkloadManagementSystem.JobWrapper.JobWrapper.ModuleFactory", side_effect=MagicMock())
jw = JobWrapper()
jw.jobArgs["InputData"] = ""
res = jw.resolveInputData()
assert res["OK"] is False
jw = JobWrapper()
jw.jobArgs["InputData"] = "pippo"
jw.dm = dm_mock
jw.fc = fc_mock
res = jw.resolveInputData()
assert res["OK"]
jw = JobWrapper()
jw.jobArgs["InputData"] = "pippo"
jw.jobArgs["LocalSE"] = "mySE"
jw.jobArgs["InputDataModule"] = "aa.bb"
jw.dm = dm_mock
jw.fc = fc_mock
res = jw.resolveInputData()
assert res["OK"]
def test_performChecks():
wd = Watchdog(
pid=os.getpid(),
exeThread=MagicMock(),
spObject=MagicMock(),
jobCPUTime=1000,
memoryLimit=1024 * 1024,
jobArgs={"StopSigNumber": 10},
)
res = wd._performChecks()
assert res["OK"]
@pytest.mark.slow
def test_execute(mocker):
mocker.patch(
"DIRAC.WorkloadManagementSystem.JobWrapper.JobWrapper.getSystemSection", side_effect=getSystemSectionMock
)
mocker.patch(
"DIRAC.WorkloadManagementSystem.JobWrapper.Watchdog.getSystemInstance", side_effect=getSystemSectionMock
)
jw = JobWrapper()
jw.jobArgs = {"Executable": "/bin/ls"}
res = jw.execute()
print("jw.execute() returns", str(res))
assert res["OK"]
shutil.copy("src/DIRAC/WorkloadManagementSystem/JobWrapper/test/script-OK.sh", "script-OK.sh")
jw = JobWrapper()
jw.jobArgs = {"Executable": "script-OK.sh"}
res = jw.execute()
assert res["OK"]
os.remove("script-OK.sh")
shutil.copy("src/DIRAC/WorkloadManagementSystem/JobWrapper/test/script.sh", "script.sh")
jw = JobWrapper()
jw.jobArgs = {"Executable": "script.sh", "Arguments": "111"}
res = jw.execute()
assert res["OK"] # In this case the application finished with errors,
# but the JobWrapper executed successfully
os.remove("script.sh")
# this will reschedule
shutil.copy("src/DIRAC/WorkloadManagementSystem/JobWrapper/test/script-RESC.sh", "script-RESC.sh")
jw = JobWrapper()
jw.jobArgs = {"Executable": "script-RESC.sh"}
res = jw.execute()
if res["OK"]: # FIXME: This may happen depending on the shell - not the best test admittedly!
print("We should not be here, unless the 'Execution thread status' is equal to 1")
assert res["OK"]
else:
assert res["OK"] is False # In this case the application finished with an error code
# that the JobWrapper interpreted as "to reschedule"
# so in this case the "execute" is considered an error
os.remove("script-RESC.sh")
os.remove("std.out")
@pytest.mark.parametrize(
"failedFlag, expectedRes, finalStates",
[
(True, 1, [JobStatus.FAILED, ""]),
(False, 0, [JobStatus.DONE, JobMinorStatus.EXEC_COMPLETE]),
],
)
def test_finalize(mocker, failedFlag, expectedRes, finalStates):
mocker.patch(
"DIRAC.WorkloadManagementSystem.JobWrapper.JobWrapper.getSystemSection", side_effect=getSystemSectionMock
)
mocker.patch("DIRAC.WorkloadManagementSystem.JobWrapper.JobWrapper.ModuleFactory", side_effect=MagicMock())
jw = JobWrapper()
jw.jobArgs = {"Executable": "/bin/ls"}
jw.failedFlag = failedFlag
res = jw.finalize()
assert res == expectedRes
assert jw.jobReport.jobStatusInfo[0][0] == finalStates[0]
assert jw.jobReport.jobStatusInfo[0][1] == finalStates[1]
| DIRACGrid/DIRAC | src/DIRAC/WorkloadManagementSystem/JobWrapper/test/Test_JobWrapper.py | Python | gpl-3.0 | 4,245 | [
"DIRAC"
] | d003f4a5213a25958be1d0fe94582a566393cd07b8c54ae61647b924dde8159c |
"""
.. todo::
WRITEME
"""
import numpy as N
import copy
from theano import config
import theano.tensor as T
from pylearn2.utils.rng import make_np_rng
class CosDataset(object):
"""
Makes a dataset that streams randomly generated 2D examples.
The first coordinate is sampled from a uniform distribution.
The second coordinate is the cosine of the first coordinate,
plus some gaussian noise.
"""
def __init__(self, min_x=-6.28, max_x=6.28, std=.05, rng=None):
"""
.. todo::
WRITEME
"""
self.min_x, self.max_x, self.std = min_x, max_x, std
rng = make_np_rng(rng, [17, 2, 946], which_method=['uniform', 'randn'])
self.default_rng = copy.copy(rng)
self.rng = rng
def energy(self, mat):
"""
.. todo::
WRITEME
"""
x = mat[:, 0]
y = mat[:, 1]
rval = (y - N.cos(x)) ** 2. / (2. * (self.std ** 2.))
return rval
def pdf_func(self, mat):
"""
.. todo::
WRITEME properly
This dataset can generate an infinite amount of examples.
This function gives the pdf from which the examples are drawn.
"""
x = mat[:, 0]
y = mat[:, 1]
rval = N.exp(-(y - N.cos(x)) ** 2. / (2. * (self.std ** 2.)))
rval /= N.sqrt(2.0 * N.pi * (self.std ** 2.))
rval /= (self.max_x - self.min_x)
rval *= x < self.max_x
rval *= x > self.min_x
return rval
def free_energy(self, X):
"""
.. todo::
WRITEME properly
This dataset can generate an infinite amount of examples.
This function gives the energy function for the distribution from which the examples are drawn.
"""
x = X[:, 0]
y = X[:, 1]
rval = T.sqr(y - T.cos(x)) / (2. * (self.std ** 2.))
mask = x < self.max_x
mask = mask * (x > self.min_x)
rval = mask * rval + (1 - mask) * 1e30
return rval
def pdf(self, X):
"""
.. todo::
WRITEME properly
This dataset can generate an infinite amount of examples.
This function gives the pdf from which the examples are drawn.
"""
x = X[:, 0]
y = X[:, 1]
rval = T.exp(-T.sqr(y - T.cos(x)) / (2. * (self.std ** 2.)))
rval /= N.sqrt(2.0 * N.pi * (self.std ** 2.))
rval /= (self.max_x - self.min_x)
rval *= x < self.max_x
rval *= x > self.min_x
return rval
def get_stream_position(self):
"""
.. todo::
WRITEME
"""
return copy.copy(self.rng)
def set_stream_position(self, s):
"""
.. todo::
WRITEME
"""
self.rng = copy.copy(s)
def restart_stream(self):
"""
.. todo::
WRITEME
"""
self.reset_RNG()
def reset_RNG(self):
"""
.. todo::
WRITEME
"""
if 'default_rng' not in dir(self):
self.default_rng = N.random.RandomState([17, 2, 946])
self.rng = copy.copy(self.default_rng)
def apply_preprocessor(self, preprocessor, can_fit=False):
"""
.. todo::
WRITEME
"""
raise NotImplementedError()
def get_batch_design(self, batch_size):
"""
.. todo::
WRITEME
"""
x = N.cast[config.floatX](self.rng.uniform(self.min_x, self.max_x,
(batch_size, 1)))
y = N.cos(x) + (N.cast[config.floatX](self.rng.randn(*x.shape)) *
self.std)
rval = N.hstack((x, y))
return rval
| skearnes/pylearn2 | pylearn2/datasets/cos_dataset.py | Python | bsd-3-clause | 3,767 | [
"Gaussian"
] | e3a22844a3b057b84f34989744a5dfea3dcd574d1658207efc2c086f63d9d28d |
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 fileencoding=utf-8
#
# MDAnalysis --- http://www.mdanalysis.org
# Copyright (c) 2006-2016 The MDAnalysis Development Team and contributors
# (see the file AUTHORS for the full list of names)
#
# Released under the GNU Public Licence, v2 or any higher version
#
# Please cite your use of MDAnalysis in published work:
#
# R. J. Gowers, M. Linke, J. Barnoud, T. J. E. Reddy, M. N. Melo, S. L. Seyler,
# D. L. Dotson, J. Domanski, S. Buchoux, I. M. Kenney, and O. Beckstein.
# MDAnalysis: A Python package for the rapid analysis of molecular dynamics
# simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th
# Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy.
#
# N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and O. Beckstein.
# MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations.
# J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787
#
from __future__ import absolute_import
from numpy.testing import (
assert_,
)
import MDAnalysis as mda
from MDAnalysisTests.topology.base import ParserBase
from MDAnalysisTests.datafiles import (
PQR,
)
class TestPQRParser(ParserBase):
parser = mda.topology.PQRParser.PQRParser
filename = PQR
expected_attrs = ['ids', 'names', 'charges', 'radii',
'resids', 'resnames',
'segids']
guessed_attrs = ['masses', 'types']
expected_n_atoms = 3341
expected_n_residues = 214
expected_n_segments = 1
def test_attr_size(self):
assert_(len(self.top.ids) == self.top.n_atoms)
assert_(len(self.top.names) == self.top.n_atoms)
assert_(len(self.top.charges) == self.top.n_atoms)
assert_(len(self.top.radii) == self.top.n_atoms)
assert_(len(self.top.resids) == self.top.n_residues)
assert_(len(self.top.resnames) == self.top.n_residues)
assert_(len(self.top.segids) == self.top.n_segments)
| kain88-de/mdanalysis | testsuite/MDAnalysisTests/topology/test_pqr.py | Python | gpl-2.0 | 2,054 | [
"MDAnalysis"
] | 237b1cce6bd161b27ba96d6b7589f398ef8c16774c61c13e6f070910098950a8 |
# The MIT License (MIT)
# Copyright (c) 2016, 2017 by the ESA CCI Toolbox development team and contributors
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
Description
===========
This module provides Cate's data access API.
Technical Requirements
======================
**Query data store**
:Description: Allow querying registered ECV data stores using a simple function that takes a set of query parameters
and returns data source identifiers that can be used to open respective ECV dataset in the Cate.
:URD-Source:
* CCIT-UR-DM0006: Data access to ESA CCI
* CCIT-UR-DM0010: The data module shall have the means to attain meta-level status information per ECV type
* CCIT-UR-DM0013: The CCI Toolbox shall allow filtering
----
**Add data store**
:Description: Allow adding of user defined data stores specifying the access protocol and the layout of the data.
These data stores can be used to access datasets.
:URD-Source:
* CCIT-UR-DM0011: Data access to non-CCI data
----
**Open dataset**
:Description: Allow opening an ECV dataset given an identifier returned by the *data store query*.
The dataset returned complies to the Cate common data model.
The dataset to be returned can optionally be constrained in time and space.
:URD-Source:
* CCIT-UR-DM0001: Data access and input
* CCIT-UR-DM0004: Open multiple inputs
* CCIT-UR-DM0005: Data access using different protocols>
* CCIT-UR-DM0007: Open single ECV
* CCIT-UR-DM0008: Open multiple ECV
* CCIT-UR-DM0009: Open any ECV
* CCIT-UR-DM0012: Open different formats
Verification
============
The module's unit-tests are located in
`test/test_ds.py <https://github.com/CCI-Tools/cate/blob/master/test/test_ds.py>`_
and may be executed using ``$ py.test test/test_ds.py --cov=cate/core/ds.py`` for extra code coverage information.
Components
==========
"""
import datetime
import glob
import itertools
import logging
import re
from abc import ABCMeta, abstractmethod
from enum import Enum
from typing import Sequence, Optional, Union, Any, Dict, Set, List
import xarray as xr
from .cdm import Schema, get_lon_dim_name, get_lat_dim_name
from .opimpl import normalize_missing_time, normalize_coord_vars, normalize_impl, subset_spatial_impl
from .types import PolygonLike, TimeRange, TimeRangeLike, VarNamesLike, ValidationError
from ..util.monitor import Monitor
__author__ = "Norman Fomferra (Brockmann Consult GmbH), " \
"Marco Zühlke (Brockmann Consult GmbH), " \
"Chris Bernat (Telespazio VEGA UK Ltd)"
URL_REGEX = re.compile(
r'^(?:http|ftp)s?://' # http:// or https://
r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' # domain
r'localhost|' # localhost...
r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})' # ...or ip
r'(?::\d+)?' # optional port
r'(?:/?|[/?]\S+)$', re.IGNORECASE)
_LOG = logging.getLogger('cate')
class DataAccessWarning(UserWarning):
"""
Warnings produced by Cate's data stores and data sources instances, used to report any problems handling data.
"""
pass
class DataAccessError(Exception):
"""
Exceptions produced by Cate's data stores and data sources instances,
used to report any problems handling data.
"""
class NetworkError(ConnectionError):
"""
Exceptions produced by Cate's data stores and data sources instances,
used to report any problems with the network or in case an endpoint
couldn't be found nor reached.
"""
pass
class DataSource(metaclass=ABCMeta):
"""
An abstract data source from which datasets can be retrieved.
"""
@property
@abstractmethod
def id(self) -> str:
"""Data source identifier."""
# TODO (forman): issue #399 - remove it, no use
@property
def schema(self) -> Optional[Schema]:
"""
The data :py:class:`Schema` for any dataset provided by this data source or ``None`` if unknown.
Currently unused in cate.
"""
return None
# TODO (forman): issue #399 - make this a property or call it "get_temporal_coverage(...)"
def temporal_coverage(self, monitor: Monitor = Monitor.NONE) -> Optional[TimeRange]:
"""
The temporal coverage as tuple (*start*, *end*) where *start* and *end* are UTC ``datetime`` instances.
:param monitor: a progress monitor.
:return: A tuple of (*start*, *end*) UTC ``datetime`` instances or ``None`` if the temporal coverage is unknown.
"""
return None
@property
@abstractmethod
def data_store(self) -> 'DataStore':
"""The data store to which this data source belongs."""
@property
def status(self) -> 'DataSourceStatus':
"""
Return information about data source accessibility
"""
return DataSourceStatus.READY
# TODO (forman): issue #399 - remove "ds_id", see TODO on "DataStore.query()"
def matches(self, ds_id: str = None, query_expr: str = None) -> bool:
"""
Test if this data source matches the given *id* or *query_expr*.
If neither *id* nor *query_expr* are given, the method returns True.
:param ds_id: A data source identifier.
:param query_expr: A query expression. Currently, only simple search strings are supported.
:return: True, if this data sources matches the given *id* or *query_expr*.
"""
if ds_id and ds_id.lower() == self.id.lower():
return True
if query_expr:
if query_expr.lower() in self.id.lower():
return True
if self.title and query_expr.lower() in self.title.lower():
return True
return False
@abstractmethod
def open_dataset(self,
time_range: TimeRangeLike.TYPE = None,
region: PolygonLike.TYPE = None,
var_names: VarNamesLike.TYPE = None,
protocol: str = None,
monitor: Monitor = Monitor.NONE) -> Any:
"""
Open a dataset from this data source.
:param time_range: An optional time constraint comprising start and end date.
If given, it must be a :py:class:`TimeRangeLike`.
:param region: An optional region constraint.
If given, it must be a :py:class:`PolygonLike`.
:param var_names: Optional names of variables to be included.
If given, it must be a :py:class:`VarNamesLike`.
:param protocol: **Deprecated.** Protocol name, if None selected default protocol
will be used to access data.
:param monitor: A progress monitor.
:return: A dataset instance or ``None`` if no data is available for the given constraints.
"""
@abstractmethod
def make_local(self,
local_name: str,
local_id: str = None,
time_range: TimeRangeLike.TYPE = None,
region: PolygonLike.TYPE = None,
var_names: VarNamesLike.TYPE = None,
monitor: Monitor = Monitor.NONE) -> Optional['DataSource']:
"""
Turns this (likely remote) data source into a local data source given a name and a number of
optional constraints.
If this is a remote data source, data will be downloaded and turned into a local data source which will
be added to the data store named "local".
If this is already a local data source, a new local data source will be created by copying
required data or data subsets.
The method returns the newly create local data source.
:param local_name: A human readable name for the new local data source.
:param local_id: A unique ID to be used for the new local data source.
If not given, a new ID will be generated.
:param time_range: An optional time constraint comprising start and end date.
If given, it must be a :py:class:`TimeRangeLike`.
:param region: An optional region constraint.
If given, it must be a :py:class:`PolygonLike`.
:param var_names: Optional names of variables to be included.
If given, it must be a :py:class:`VarNamesLike`.
:param monitor: A progress monitor.
:return: the new local data source
"""
pass
@property
def title(self) -> Optional[str]:
"""
Human-readable data source title.
The default implementation tries to retrieve the title from ``meta_info['title']``.
"""
meta_info = self.meta_info
if meta_info is None:
return None
return meta_info.get('title')
# TODO (forman): issue #399 - explain expected metadata entries and their formats, e.g."variables"
@property
def meta_info(self) -> Optional[dict]:
"""
Return meta-information about this data source.
The returned dict, if any, is JSON-serializable.
"""
return None
@property
def cache_info(self) -> Optional[dict]:
"""
Return information about cached, locally available data sets.
The returned dict, if any, is JSON-serializable.
"""
return None
@property
def variables_info(self) -> Optional[dict]:
"""
Return meta-information about the variables contained in this data source.
The returned dict, if any, is JSON-serializable.
"""
return None
@property
def info_string(self) -> str:
"""
Return a textual representation of the meta-information about this data source.
Useful for CLI / REPL applications.
"""
meta_info = self.meta_info
if not meta_info:
return 'No data source meta-information available.'
max_len = 0
for name in meta_info.keys():
max_len = max(max_len, len(name))
info_lines = []
for name, value in meta_info.items():
if name != 'variables':
info_lines.append('%s:%s %s' % (name, (1 + max_len - len(name)) * ' ', value))
return '\n'.join(info_lines)
def __str__(self):
return self.info_string
# TODO (forman): issue #399 - remove @abstractmethod, provide reasonable default impl. to make it a convenient ABC
@abstractmethod
def _repr_html_(self):
"""Provide an HTML representation of this object for IPython."""
def _cannot_access_error(self, time_range=None, region=None, var_names=None,
verb="open", cause: BaseException = None, error_cls=DataAccessError):
error_message = f'Failed to {verb} data source "{self.id}"'
constraints = []
if time_range is not None and time_range != "":
constraints.append("time range")
if region is not None and region != "":
constraints.append("region")
if var_names is not None and var_names != "":
constraints.append("variable names")
if constraints:
error_message += " for given " + ", ".join(constraints)
if cause is not None:
error_message += f": {cause}"
_LOG.info(error_message)
return error_cls(error_message)
def _empty_error(self, time_range=None):
error_message = f'Data source "{self.id}" does not seem to have any datasets'
if time_range is not None:
error_message += f' in given time range {TimeRangeLike.format(time_range)}'
_LOG.info(error_message)
return DataAccessError(error_message)
class DataSourceStatus(Enum):
"""
Enum stating current state of Data Source accessibility.
* READY - data is complete and ready to use
* ERROR - data initialization process has been interrupted, causing that data source is incomplete or/and corrupted
* PROCESSING - data source initialization process is in progress.
* CANCELLED - data initialization process has been intentionally interrupted by user
"""
READY = "READY",
ERROR = "ERROR",
PROCESSING = "PROCESSING",
CANCELLED = "CANCELLED"
class DataStoreNotice:
"""
A short notice that can be exposed to users by data stores.
"""
def __init__(self, id: str, title: str, content: str, intent: str = None, icon: str = None):
"""
A short notice that can be exposed to users by data stores.
:param id: Notice ID.
:param title: A human-readable, plain text title.
:param content: A human-readable, plain text title that may be formatted using Markdown.
:param intent: Notice intent, may be one of "default", "primary", "success", "warning", "danger"
:param icon: An option icon name. See https://blueprintjs.com/docs/versions/1/#core/icons
"""
if id is None or id == "":
raise ValueError("invalid id")
if title is None or title == "":
raise ValueError("invalid title")
if content is None or content == "":
raise ValueError("invalid content")
if intent not in {None, "default", "primary", "success", "warning", "danger"}:
raise ValueError("invalid intent")
self._dict = dict(id=id, title=title, content=content, icon=icon, intent=intent)
@property
def id(self):
return self._dict["id"]
@property
def title(self):
return self._dict["title"]
@property
def content(self):
return self._dict["content"]
@property
def intent(self):
return self._dict["intent"]
@property
def icon(self):
return self._dict["icon"]
def to_dict(self):
return dict(self._dict)
class DataStore(metaclass=ABCMeta):
"""
Represents a data store of data sources.
:param ds_id: Unique data store identifier.
:param title: A human-readable tile.
"""
def __init__(self, ds_id: str, title: str = None, is_local: bool = False):
self._id = ds_id
self._title = title or ds_id
self._is_local = is_local
@property
def id(self) -> str:
"""
Return the unique identifier for this data store.
"""
return self._id
@property
def title(self) -> str:
"""
Return a human-readable tile for this data store.
"""
return self._title
@property
def description(self) -> Optional[str]:
"""
Return an optional, human-readable description for this data store as plain text.
The text may use Markdown formatting.
"""
return None
@property
def notices(self) -> List[DataStoreNotice]:
"""
Return an optional list of notices for this data store that can be used to inform users about the
conventions, standards, and data extent used in this data store or upcoming service outages.
"""
return []
@property
def is_local(self) -> bool:
"""
Whether this is a remote data source not requiring any internet connection when its ``query()`` method
is called or the ``open_dataset()`` and ``make_local()`` methods on one of its data sources.
"""
return self._is_local
def invalidate(self):
"""
Datastore might use a cached list of available dataset which can change in time.
Resources managed by a datastore are external so we have to consider that they can
be updated by other process.
This method ask to invalidate the internal structure and synchronize it with the
current status
:return:
"""
pass
def get_updates(self, reset=False) -> Dict:
"""
Ask the datastore to retrieve the differences found between a previous
dataStore status and the current one,
The implementation return a dictionary with the new ['new'] and removed ['del'] dataset.
it also return the reference time to the datastore status taken as previous.
Reset flag is used to clean up the support files, freeze and diff.
:param: reset=False. Set this flag to true to clean up all the support files forcing a
synchronization with the remote catalog
:return: A dictionary with keys { 'generated', 'source_ref_time', 'new', 'del' }.
genetated: generation time, when the check has been executed
source_ref_time: when the local copy of the remoted dataset hes been made.
It is also used by the system to refresh the current images when
is older then 1 day.
new: a list of new dataset entry
del: a list of removed datset
"""
generated = datetime.datetime.now()
report = {"generated": str(generated),
"source_ref_time": str(generated),
"new": list(),
"del": list()}
return report
# TODO (forman): issue #399 - introduce get_data_source(ds_id), we have many usages in code, ALT+F7 on "query"
# @abstractmethod
# def get_data_source(self, ds_id: str, monitor: Monitor = Monitor.NONE) -> Optional[DataSource]:
# """
# Get data sources by identifier *ds_id*.
#
# :param ds_id: Data source identifier.
# :param monitor: A progress monitor.
# :return: The data sources, or ``None`` if it doesn't exists.
# """
# TODO (forman): issue #399 - remove "ds_id" keyword, use "get_data_source(ds_id)" instead
# TODO (forman): issue #399 - code duplication: almost all implementations are same or very similar
@abstractmethod
def query(self, ds_id: str = None, query_expr: str = None, monitor: Monitor = Monitor.NONE) -> Sequence[DataSource]:
"""
Retrieve data sources in this data store using the given constraints.
:param ds_id: Data source identifier.
:param query_expr: Query expression which may be used if *ìd* is unknown.
:param monitor: A progress monitor.
:return: Sequence of data sources.
"""
# TODO (forman): issue #399 - remove @abstractmethod, provide reasonable default impl. to make it a convenient ABC
@abstractmethod
def _repr_html_(self):
"""Provide an HTML representation of this object for IPython."""
class DataStoreRegistry:
"""
Registry of :py:class:`DataStore` objects.
"""
def __init__(self):
self._data_stores = dict()
def get_data_store(self, ds_id: str) -> Optional[DataStore]:
return self._data_stores.get(ds_id)
def get_data_stores(self) -> Sequence[DataStore]:
return list(self._data_stores.values())
def add_data_store(self, data_store: DataStore):
self._data_stores[data_store.id] = data_store
def remove_data_store(self, ds_id: str):
del self._data_stores[ds_id]
def __len__(self):
return len(self._data_stores)
def __str__(self):
return self.__repr__()
def __repr__(self):
import pprint
return pprint.pformat(self._data_stores)
def _repr_html_(self):
rows = []
for ds_id, data_store in self._data_stores.items():
rows.append('<tr><td>%s</td><td>%s</td></tr>' % (ds_id, repr(data_store)))
return '<table>%s</table>' % '\n'.join(rows)
#: The data data store registry of type :py:class:`DataStoreRegistry`.
#: Use it add new data stores to Cate.
DATA_STORE_REGISTRY = DataStoreRegistry()
def find_data_sources_update(data_stores: Union[DataStore, Sequence[DataStore]] = None) -> Dict:
"""
find difference in the list of data source of the given data store (all when None).
The updateds will be returned as dictionaty where the key is the Data store ID.
The value is a dictionary too contining the list of 'new', 'de' (removed) dataset
:param data_stores: list of Data store(s) to be cheked. If None all the refgistered Data store
will be checked
:return: dictionary index by data store ID, values are a second dictionary with the updates sorted by
new and del data source in addition to source_ref_time which is the time of snapshot used to
compare the data source list
"""
data_store_list = []
if data_stores is None:
data_store_list = DATA_STORE_REGISTRY.get_data_stores()
response = dict()
for ds in data_store_list:
r = ds.get_updates()
if r['new'] or r['del']:
response[ds.id] = r
return response
def find_data_sources(data_stores: Union[DataStore, Sequence[DataStore]] = None,
ds_id: str = None,
query_expr: str = None) -> Sequence[DataSource]:
"""
Find data sources in the given data store(s) matching the given *id* or *query_expr*.
See also :py:func:`open_dataset`.
:param data_stores: If given these data stores will be queried. Otherwise all registered data stores will be used.
:param ds_id: A data source identifier.
:param query_expr: A query expression.
:return: All data sources matching the given constrains.
"""
results = []
primary_data_store = None
data_store_list = []
if data_stores is None:
data_store_list = DATA_STORE_REGISTRY.get_data_stores()
elif isinstance(data_stores, DataStore):
primary_data_store = data_stores
else:
data_store_list = data_stores
for data_store in data_store_list:
# datastore cache might be out of synch
data_store.invalidate()
if not primary_data_store and ds_id and ds_id.count('.') > 0:
primary_data_store_index = -1
primary_data_store_id, data_source_name = ds_id.split('.', 1)
for idx, data_store in enumerate(data_store_list):
if data_store.id == primary_data_store_id:
primary_data_store_index = idx
if primary_data_store_index >= 0:
primary_data_store = data_store_list.pop(primary_data_store_index)
if primary_data_store:
results.extend(primary_data_store.query(ds_id=ds_id, query_expr=query_expr))
if not results:
# noinspection PyTypeChecker
for data_store in data_store_list:
results.extend(data_store.query(ds_id=ds_id, query_expr=query_expr))
return results
def open_dataset(data_source: Union[DataSource, str],
time_range: TimeRangeLike.TYPE = None,
region: PolygonLike.TYPE = None,
var_names: VarNamesLike.TYPE = None,
force_local: bool = False,
local_ds_id: str = None,
monitor: Monitor = Monitor.NONE) -> Any:
"""
Open a dataset from a data source.
:param data_source: A ``DataSource`` object or a string.
Strings are interpreted as the identifier of an ECV dataset and must not be empty.
:param time_range: An optional time constraint comprising start and end date.
If given, it must be a :py:class:`TimeRangeLike`.
:param region: An optional region constraint.
If given, it must be a :py:class:`PolygonLike`.
:param var_names: Optional names of variables to be included.
If given, it must be a :py:class:`VarNamesLike`.
:param force_local: Optional flag for remote data sources only
Whether to make a local copy of data source if it's not present
:param local_ds_id: Optional, fpr remote data sources only
Local data source ID for newly created copy of remote data source
:param monitor: A progress monitor
:return: An new dataset instance
"""
if not data_source:
raise ValidationError('No data source given')
if isinstance(data_source, str):
data_store_list = list(DATA_STORE_REGISTRY.get_data_stores())
data_sources = find_data_sources(data_store_list, ds_id=data_source)
if len(data_sources) == 0:
raise ValidationError(f'No data sources found for the given ID {data_source!r}')
elif len(data_sources) > 1:
raise ValidationError(f'{len(data_sources)} data sources found for the given ID {data_source!r}')
data_source = data_sources[0]
if force_local:
with monitor.starting('Opening dataset', 100):
data_source = data_source.make_local(local_name=local_ds_id if local_ds_id else "",
time_range=time_range, region=region, var_names=var_names,
monitor=monitor.child(80))
return data_source.open_dataset(time_range, region, var_names, monitor=monitor.child(20))
else:
return data_source.open_dataset(time_range, region, var_names, monitor=monitor)
# noinspection PyUnresolvedReferences,PyProtectedMember
def open_xarray_dataset(paths,
region: PolygonLike.TYPE = None,
var_names: VarNamesLike.TYPE = None,
monitor: Monitor = Monitor.NONE,
**kwargs) -> xr.Dataset:
r"""
Open multiple files as a single dataset. This uses dask. If each individual file
of the dataset is small, one Dask chunk will coincide with one temporal slice,
e.g. the whole array in the file. Otherwise smaller dask chunks will be used
to split the dataset.
:param paths: Either a string glob in the form "path/to/my/files/\*.nc" or an explicit
list of files to open.
:param region: Optional region constraint.
:param var_names: Optional variable names constraint.
:param monitor: Optional progress monitor.
:param kwargs: Keyword arguments directly passed to ``xarray.open_mfdataset()``
"""
# paths could be a string or a list
files = []
if isinstance(paths, str):
files.append(paths)
else:
files.extend(paths)
# should be a file or a glob or an URL
files = [(i,) if re.match(URL_REGEX, i) else glob.glob(i) for i in files]
# make a flat list
files = list(itertools.chain.from_iterable(files))
if not files:
raise IOError('File {} not found'.format(paths))
if 'concat_dim' in kwargs:
concat_dim = kwargs.pop('concat_dim')
else:
concat_dim = 'time'
if 'chunks' in kwargs:
chunks = kwargs.pop('chunks')
elif len(files) > 1:
# By default the dask chunk size of xr.open_mfdataset is (1, lat, lon). E.g.,
# the whole array is one dask slice irrespective of chunking on disk.
#
# netCDF files can also feature a significant level of compression rendering
# the known file size on disk useless to determine if the default dask chunk
# will be small enough that a few of them could comfortably fit in memory for
# parallel processing.
#
# Hence we open the first file of the dataset and detect the maximum chunk sizes
# used in the spatial dimensions.
#
# If no such sizes could be found, we use xarray's default chunking.
chunks = get_spatial_ext_chunk_sizes(files[0])
else:
chunks = None
def preprocess(raw_ds: xr.Dataset):
# Add a time dimension if attributes "time_coverage_start" and "time_coverage_end" are found.
norm_ds = normalize_missing_time(normalize_coord_vars(raw_ds))
monitor.progress(work=1)
return norm_ds
with monitor.starting('Opening dataset', len(files)):
# autoclose ensures that we can open datasets consisting of a number of
# files that exceeds OS open file limit.
ds = xr.open_mfdataset(files,
concat_dim=concat_dim,
coords='minimal',
chunks=chunks,
preprocess=preprocess,
**kwargs)
if var_names:
ds = ds.drop([var_name for var_name in ds.data_vars.keys() if var_name not in var_names])
ds = normalize_impl(ds)
if region:
ds = subset_spatial_impl(ds, region)
return ds
def get_spatial_ext_chunk_sizes(ds_or_path: Union[xr.Dataset, str]) -> Dict[str, int]:
"""
Get the spatial, external chunk sizes for the latitude and longitude dimensions
of a dataset as provided in a variable's encoding object.
:param ds_or_path: An xarray dataset or a path to file that can be opened by xarray.
:return: A mapping from dimension name to external chunk sizes.
"""
if isinstance(ds_or_path, str):
ds = xr.open_dataset(ds_or_path, decode_times=False)
else:
ds = ds_or_path
lon_name = get_lon_dim_name(ds)
lat_name = get_lat_dim_name(ds)
if lon_name and lat_name:
chunk_sizes = get_ext_chunk_sizes(ds, {lat_name, lon_name})
else:
chunk_sizes = None
if isinstance(ds_or_path, str):
ds.close()
return chunk_sizes
def get_ext_chunk_sizes(ds: xr.Dataset, dim_names: Set[str] = None,
init_value=0, map_fn=max, reduce_fn=None) -> Dict[str, int]:
"""
Get the external chunk sizes for each dimension of a dataset as provided in a variable's encoding object.
:param ds: The dataset.
:param dim_names: The names of dimensions of data variables whose external chunking should be collected.
:param init_value: The initial value (not necessarily a chunk size) for mapping multiple different chunk sizes.
:param map_fn: The mapper function that maps a chunk size from a previous (initial) value.
:param reduce_fn: The reducer function the reduces multiple mapped chunk sizes to a single one.
:return: A mapping from dimension name to external chunk sizes.
"""
agg_chunk_sizes = None
for var_name in ds.variables:
var = ds[var_name]
if var.encoding:
chunk_sizes = var.encoding.get('chunksizes')
if chunk_sizes \
and len(chunk_sizes) == len(var.dims) \
and (not dim_names or dim_names.issubset(set(var.dims))):
for dim_name, size in zip(var.dims, chunk_sizes):
if not dim_names or dim_name in dim_names:
if agg_chunk_sizes is None:
agg_chunk_sizes = dict()
old_value = agg_chunk_sizes.get(dim_name)
agg_chunk_sizes[dim_name] = map_fn(size, init_value if old_value is None else old_value)
if agg_chunk_sizes and reduce_fn:
agg_chunk_sizes = {k: reduce_fn(v) for k, v in agg_chunk_sizes.items()}
return agg_chunk_sizes
def format_variables_info_string(variables: dict):
"""
Return some textual information about the variables contained in this data source.
Useful for CLI / REPL applications.
:param variables:
:return:
"""
if not variables:
return 'No variables information available.'
info_lines = []
for variable in variables:
info_lines.append('%s (%s):' % (variable.get('name', '?'), variable.get('units', '-')))
info_lines.append(' Long name: %s' % variable.get('long_name', '?'))
info_lines.append(' CF standard name: %s' % variable.get('standard_name', '?'))
info_lines.append('')
return '\n'.join(info_lines)
def format_cached_datasets_coverage_string(cache_coverage: dict) -> str:
"""
Return a textual representation of information about cached, locally available data sets.
Useful for CLI / REPL applications.
:param cache_coverage:
:return:
"""
if not cache_coverage:
return 'No information about cached datasets available.'
info_lines = []
for date_from, date_to in sorted(cache_coverage.items()):
info_lines.append('{date_from} to {date_to}'
.format(date_from=date_from.strftime('%Y-%m-%d'),
date_to=date_to.strftime('%Y-%m-%d')))
return '\n'.join(info_lines)
| CCI-Tools/ect-core | cate/core/ds.py | Python | mit | 33,154 | [
"NetCDF"
] | 12573c1bc8b69255d381c77f0b4f8ad29d110b2218ffe6338c6a81fbdc9605cc |
# Copyright 2011 by Eric Talevich. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Phylogenetics command line tool wrappers."""
from ._Phyml import PhymlCommandline
from ._Raxml import RaxmlCommandline
from ._Fasttree import FastTreeCommandline
# Make this explicit, then they show up in the API docs
__all__ = ["PhymlCommandline",
"RaxmlCommandline",
"FastTreeCommandline",
]
| zjuchenyuan/BioWeb | Lib/Bio/Phylo/Applications/__init__.py | Python | mit | 557 | [
"Biopython"
] | 35598990928fd0fe3f8197e15a957ad0ce9603eedb6758ea28815a8bc8b5aae9 |
"""
Tests traceorbit module. This module makes great use of Galpy so for
convenience a brief summary of Galpy coordinates is provided:
Galpy coordinates are [R, vR, vT, z, vz, phi]
By default, positions are scaled by LSR distance from galactic centre,
ro=8kpc, and velocities scaled by the LSR circular velocity,
vo = 220km/s. Time is scaled such that after 1 time unit has passed,
the LSR has travelled 1 radian about the galactic centre. The values are
returned in a [ntimes, 6]
array:
R : galactic radial distance /ro
vR : galactic radial velocity /vo
vT : circular velocity /vo
z : vertical distance from plane / ro
vz : vertical velocity / vo
phi : angle about the galaxy (anticlockwise from LSR's location at
t=0)
For example, the LSR at t=0.0 and t=1.0 as values:
[1., 0., 1., 0., 0., 0.]
[1., 0., 1., 0., 0., 1.]
"""
import logging
import numpy as np
import sys
from galpy.orbit import Orbit
from galpy.potential import MWPotential2014
from galpy.potential import MiyamotoNagaiPotential
sys.path.insert(0, '..')
import chronostar.traceorbit as torb
LOGGINGLEVEL = logging.DEBUG
def test_traceforwardThenBack():
"""Check that tracing a point forward then back for the same time step
returns initial position
"""
return
ABS_TOLERANCE = 1e-3
xyzuvws = np.array([
[0.,0.,25.,0.,0.,0.],
# [10.,0.,-50.,0.,0.,0.],
# [0.,0.,0.,10.,25.,30.,],
])
age = 100.
times = np.linspace(0,100,1001)
for xyzuvw_start in xyzuvws:
galpy_start = None
xyzuvw_end = torb.trace_cartesian_orbit(xyzuvw_start,
times=age,
single_age=True,
)
xyzuvw_start_again = torb.trace_cartesian_orbit(xyzuvw_end,
times=-age,
single_age=True,
)
assert np.allclose(xyzuvw_start, xyzuvw_start_again,
atol=ABS_TOLERANCE)
def test_galpy_stationary_conversions():
"""Check if gaply conversions behave as expected where everything
is at time 0"""
# Test LSR
lsr_chron = np.zeros(6)
lsr_galpy = np.array([1.,0,1,0,0,0])
assert np.allclose(lsr_chron,
torb.convert_galpycoords2cart(lsr_galpy, ts=0.))
assert np.allclose(lsr_galpy,
torb.convert_cart2galpycoords(lsr_chron, ts=0.))
# Test galactic centre
gc_chron = np.array([8000.,0,0,0,-220.,0,])
gc_galpy = np.ones(6) * 1e-15
assert np.allclose(gc_chron,
torb.convert_galpycoords2cart(gc_galpy, ts=0.))
assert np.allclose(gc_galpy,
torb.convert_cart2galpycoords(gc_chron, ts=0.))
# Test simple, off origin point
off_chron = np.array([4000, 8000.*np.sqrt(3)/2, 0,
np.sin(np.pi/3)*220.,
-np.cos(np.pi/3)*220.,
0])
off_galpy = np.array([1.,0,1,0,0,np.pi/3.])
assert np.allclose(off_galpy,
torb.convert_cart2galpycoords(off_chron, ts=0.))
assert np.allclose(off_chron,
torb.convert_galpycoords2cart(off_galpy, ts=0.))
# Test random positions
SPREAD = 100000
NSAMPLES = int(1e6)
many_pos_chron = (np.random.rand(NSAMPLES,6) - 0.5) * SPREAD # uniform between -10 and 10
many_pos_galpy = torb.convert_cart2galpycoords(many_pos_chron, ts=0.)
assert np.allclose(many_pos_chron,
torb.convert_galpycoords2cart(many_pos_galpy, ts=0.),
atol=1e-2)
def test_galpy_moving_conversions():
"""Check if gaply conversions behave as expected where time
is allowed to vary."""
lsr_chron = np.zeros(6)
lsr_galpy = np.array([1.,0,1,0,0,0])
# Incorporate positive time into lsr position checks
NSTEPS = 10
galpy_times = np.linspace(0., 2*np.pi, NSTEPS)
lsrs_chron = np.repeat(lsr_chron, NSTEPS).reshape(6,-1).T
lsrs_galpy = np.repeat(lsr_galpy, NSTEPS).reshape(6,-1).T
lsrs_galpy[:,-1] = galpy_times
chron_times = torb.convert_bovytime2myr(galpy_times)
assert np.allclose(
lsrs_chron,
torb.convert_galpycoords2cart(lsrs_galpy, ts=galpy_times))
assert np.allclose(
lsrs_galpy,
torb.convert_cart2galpycoords(lsrs_chron, ts=chron_times)
)
# Incorporate negative time into lsr position checks
galpy_times = np.linspace(0., -2*np.pi, NSTEPS)
lsrs_chron = np.repeat(lsr_chron, NSTEPS).reshape(6,-1).T
lsrs_galpy = np.repeat(lsr_galpy, NSTEPS).reshape(6,-1).T
lsrs_galpy[:,-1] = galpy_times
chron_times = torb.convert_bovytime2myr(galpy_times)
assert np.allclose(
lsrs_chron,
torb.convert_galpycoords2cart(lsrs_galpy, ts=galpy_times))
assert np.allclose(
lsrs_galpy,
torb.convert_cart2galpycoords(lsrs_chron, ts=chron_times)
)
# Test random positions with random times
SPREAD = int(1e4) # pc
NSAMPLES = 100
many_pos_chron = (np.random.rand(NSAMPLES,6) - 0.5) * SPREAD # uniform between -10 and 10
many_chron_times = np.random.rand(NSAMPLES) * 100 #Myr
many_pos_galpy = torb.convert_cart2galpycoords(
many_pos_chron, ts=many_chron_times
)
many_galpy_times = torb.convert_myr2bovytime(many_chron_times)
for i in range(NSAMPLES):
assert np.allclose(many_pos_chron[i],
torb.convert_galpycoords2cart(
many_pos_galpy[i], ts=many_galpy_times[i]
),
atol=1e-2)
def test_careful_traceback_and_forward():
"""Step by step, project orbit forward, then backward"""
bovy_times = np.array([0., np.pi/3.])
chron_times = torb.convert_bovytime2myr(bovy_times)
init_pos_chron = np.array([
4000, 8000.*np.sqrt(3)/2, 0,
np.sin(np.pi/3)*220.,
-np.cos(np.pi/3)*220.,
0
])
init_pos_galpy = torb.convert_cart2galpycoords(init_pos_chron, ts=0.)
assert np.allclose(np.array([1.,0,1,0,0,np.pi/3.]),
init_pos_galpy)
o = Orbit(vxvv=init_pos_galpy, ro=8., vo=220.)
o.integrate(bovy_times, MWPotential2014, method='odeint')
orbit_galpy = o.getOrbit()
assert np.allclose(init_pos_galpy, orbit_galpy[0])
assert np.allclose(init_pos_galpy
+ np.array([0.,0.,0.,0.,0.,bovy_times[-1]]),
orbit_galpy[-1])
orbit_chron = torb.convert_galpycoords2cart(orbit_galpy,
ts=bovy_times)
assert np.allclose(init_pos_chron, orbit_chron[0])
assert np.allclose(init_pos_chron,
orbit_chron[-1])
# Setup for backwards time integration
# Currently at time of PI/3
back_init_pos_chron = orbit_chron[-1]
back_init_pos_galpy = torb.convert_cart2galpycoords(
back_init_pos_chron,
bovy_times=bovy_times[-1],
)
assert np.allclose(back_init_pos_galpy,
torb.convert_cart2galpycoords(
back_init_pos_chron,
bovy_times=bovy_times[-1]
))
back_o = Orbit(vxvv=back_init_pos_galpy, ro=8., vo=220.)
back_o.integrate(-1*bovy_times, MWPotential2014, method='odeint')
back_orbit_galpy = back_o.getOrbit()
assert np.allclose(back_init_pos_galpy, back_orbit_galpy[0])
assert np.allclose(back_init_pos_galpy
- np.array([0.,0.,0.,0.,0.,bovy_times[-1]]),
back_orbit_galpy[-1])
assert np.allclose(init_pos_galpy, back_orbit_galpy[-1])
back_orbit_chron = torb.convert_galpycoords2cart(
back_orbit_galpy,
ts=bovy_times[::-1],
)
assert np.allclose(init_pos_chron, back_orbit_chron[-1])
def test_traceback_and_forward():
"""The test that shows things are broken"""
time = 10. #Myr
times = np.array([0., 10.])
init_pos_chron = np.array([10.,0.,30.,0.,0.,0.])
# init_pos_chron = np.zeros(6)
init_pos_chron = np.array([4000, 8000.*np.sqrt(3)/2, 0,
np.sin(np.pi/3)*220.,
-np.cos(np.pi/3)*220.,
0])
final_pos_chron = torb.trace_cartesian_orbit(
init_pos_chron, times=times, single_age=False)[-1]
final_pos_chron = torb.traceforward_from_now(
init_pos_chron, time=time,
)
assert np.allclose(
init_pos_chron,
torb.traceback_to_now(
final_pos_chron, time,
),
atol=1e-5
)
def test_multi_traceback_and_forward():
np.random.seed(0)
NPOSITIONS = 10
init_positions = np.random.rand(NPOSITIONS, 6) * 20 - 10
time_spans = np.random.rand(NPOSITIONS) * 30
for pos, time in zip(init_positions, time_spans):
final_pos = torb.traceforward_from_now(pos, time)
init_pos = torb.traceback_to_now(final_pos, time)
assert np.allclose(pos, init_pos, atol=1e-3)
for pos, time in zip(init_positions, time_spans):
print('time: {}'.format(time))
final_pos = torb.traceback_from_now(pos, time)
init_pos = torb.traceforward_to_now(final_pos, time)
assert np.allclose(pos, init_pos, atol=1e-3)
def test_interval_tracing():
np.random.seed(0)
start = np.random.rand(6) * 20 - 10
time_steps = [3.,-10.,-3.,10]
current_pos = start
for time_step in time_steps:
current_pos = torb.base_trace_cartesian_orbit(
current_pos,
end_time = time_step,
)
assert np.allclose(start, current_pos, atol=1e-3)
def test_interval_tracing_orig():
np.random.seed(0)
start = np.random.rand(6) * 20 - 10
time_steps = [3.,-10.,-3.,10]
current_pos = start
for time_step in time_steps:
current_pos = torb.trace_cartesian_orbit(
current_pos,
times=time_step,
single_age=True,
)
assert np.allclose(start, current_pos, 1e-3) | mikeireland/chronostar | unit_tests/debug_traceorbit.py | Python | mit | 10,294 | [
"Galaxy"
] | 96f4f5be7d55ed96bf396af7a1c3cfa9f05cf466822913e73824149ead034e08 |
from copy import deepcopy
import os
ROOT = os.path.dirname(os.path.abspath(__file__)) + '/'
from astropy import log as logger
from astropy.io import fits
import numpy as np
from numpy.random import normal
from .psf import GaussianPSF, FilePSF, FunctionPSF
from .filter import Filter
from .utils.plot import MakePlots
from .utils.resolution import ConservingZoom, central
from .utils.tools import properties, grid_units, get_slices, average_collapse, central_wav
from .utils.units import ConvertUnits
# submitting PhD thesis today :)
class SyntheticCube(object):
'''
SyntheticCube is part the FluxCompensator. It converts
input_arrays (e. g. HYPERION ModelOutput) to "realistic"
synthetic observations (e.g. accounts for PSF, filters & noise).
It contains attributes like ModelOutput (see Notes).
If input_array is already a SyntheticCube object, the attributes are
passed. If input_array is not a SyntheticCube object, SyntheticCube
specific attributes are defined and then passed.
Parameters
----------
input_array : SyntheticCube, ModelOutput, optional
input_array also reads arrays with ModelOutput like properties.
unit_out : str, optional
The output units for SyntheticCube val. Valid options are:
* ``'ergs/cm^2/s'``
* ``'ergs/cm^2/s/Hz'``
* ``'Jy'``
* ``'mJy'``
* ``'MJy/sr'``
The default is ``'ergs/cm^2/s'``.
name : str
The name of the FluxCompensator object until another
input_array is called. The default is ``None``.
Attributes
----------
wav : numpy.ndarray
The wavelengths of val cube slices in microns.
val : numpy.ndarray
The 3D cube with shape (x, y, wav).
units : str
Current units of the val cube.
distance : str
Distance to the observed object in cm.
x_min : float
Physical offset from axis origin in FOV in cm.
x_max : float
Physical offset from axis origin in FOV in cm.
y_min : float
Physical offset from axis origin in FOV in cm.
y_max : float
Physical offset from axis origin in FOV in cm.
lon_min : float
Minimal longitudinal angle.
lon_max : float
Maximal longitudinal angle.
lat_min : float
Minimal latitudinal angle.
lat_max : float
Maximal latitudinal angle.
pix_area_sr : float
Pixel area per sr.
Notes
-----
unit_in : str
Unit of val in input_array. Valid options are:
* ``'ergs/cm^2/s'``
* ``'ergs/cm^2/s/Hz'``
* ``'Jy'``
* ``'mJy'``
* ``'MJy/sr'``
grid_unit : float
Physical unit of FOV axis in cm. Valid options are:
* ``au`` in cm
* ``pc`` in cm
* ``kpc`` in cm
grid_unit_name
Astronomical unit of FOV axis. Valid options are:
* ``'au'``
* ``'pc'``
* ``'kpc'``
FOV : tuple
Tuple ``FOV(x,y)`` of Field of View pixel entries.
* pixel in x direction: ``FOV[0]``
* pixel in y direction: ``FOV[1]``
name : str
The name of the FluxCompensator object until another
input_array is called. The default is ``None``.
stage : str
Gives current operation stage of SyntheticCube.
E. g. ``'SyntheticCube: convolve_filter'``
log : list
List of strings of the previous and current stages.
filter : dict
Dictionary ``filter = {name, waf_0, waf_min, waf_max}``
of the applied filter.
* name of filter: ``filter['name']``
* central wavelength: ``filter['waf_0']``
* minimal wavelength: ``filter['waf_min']``
* maximal wavelength: ``filter['waf_max']``
Returns
-------
cube : SyntheticCube
3D val array with SyntheticCube properties.
image : SyntheticImage
2D val array with SyntheticImage properties.
sed : SyntheticSED
1D val array (collapsed rough SED) with SyntheticSED properties.
flux : SyntheticFlux
0D val array (scalar) with SyntheticFlux properties.
'''
def __init__(self, input_array, unit_out='ergs/cm^2/s', name=None):
# Hyperion ModelOutput attributes
#if input_array.val.ndim == 3:
self.val = np.array(deepcopy(input_array.val))
#else:
# raise Exception('input_array does not have the right dimensions. numpy array of (x, y, wav) is required.')
self.wav = np.array(deepcopy(input_array.wav))
self.units = input_array.units
self.distance = input_array.distance
self.x_max = input_array.x_max
self.x_min = input_array.x_min
self.y_max = input_array.y_max
self.y_min = input_array.y_min
self.lon_min = input_array.lon_min
self.lon_max = input_array.lon_max
self.lat_min = input_array.lat_min
self.lat_max = input_array.lat_max
self.pix_area_sr = input_array.pix_area_sr
##################
# new attributes #
##################
if isinstance(input_array, SyntheticCube):
# attributes with are passed, since input_array is SyntheticCube
# physical values
self.unit_in = input_array.unit_in
self.unit_out = input_array.unit_out
self.grid_unit = input_array.grid_unit
self.grid_unit_name = input_array.grid_unit_name
# properties of cube
self.FOV = deepcopy(input_array.FOV)
# name
self.name = input_array.name
self.stage = input_array.stage
self.log = deepcopy(input_array.log)
# filter
self.filter = deepcopy(input_array.filter)
else: # attributes are defined, since input_array is NOT SyntheticCube
# physical values
self.unit_in = input_array.units
self.unit_out = unit_out
self.grid_unit = grid_units(self.x_max - self.x_min)['grid_unit']
self.grid_unit_name = grid_units(self.x_max - self.x_min)['grid_unit_name']
self.FOV = (self.x_max - self.x_min, self.y_max - self.y_min)
# name
self.name = name
self.stage = 'SyntheticCube: initial'
self.log = [self.stage]
# filter
self.filter = {'name': None, 'waf_0': None, 'waf_min': None, 'waf_max': None}
# convert into val units into unit_out
s = ConvertUnits(wav=self.wav, val=self.val)
self.val = s.get_unit(in_units=self.unit_in, out_units=self.unit_out, input_resolution=self.resolution['arcsec'])
self.units = self.unit_out
def extinction(self, A_v, input_opacities=None):
'''
Accounts for reddening.
Parameters
----------
A_v : Value of the visible extinction.
input_opacities : ``None``, str
If ``None`` standard extinction law is used.
Otherwise a e. g. input_opacities.txt file can be passed
as a str to read an opacity file with column #1 wav in microns
and column #2 in cm^2/g.
Default is ``None``.
Returns
-------
cube : SyntheticCube
'''
stage = 'SyntheticCube: extinction'
# read own extinction law
if input_opacities is None:
t = np.loadtxt(ROOT + 'database/extinction/extinction_law.txt')
else:
t = np.loadtxt(input_opacities)
wav_ext = t[:, 0]
k_lam = t[:, 1]
# wav_ext monotonically increasing
if wav_ext[0] > wav_ext[1]:
wav_ext = wav_ext[::-1]
k_lam = k_lam[::-1]
k_v = np.interp(0.550, wav_ext, k_lam)
# interpolate to get A_int for a certain wavelength
k = np.interp(self.wav, wav_ext, k_lam)
A_int_lam = A_v * (k / k_v)
# apply extinction law
val_ext = np.zeros(shape=np.shape(self.val))
val_ext[:,:,:len(self.wav)] = self.val[:,:,:len(self.wav)] * 10**(-0.4 * A_int_lam[:len(self.wav)])
# return SimulateCube
c = SyntheticCube(self)
c.val = val_ext
c.stage = stage
c.log.append(c.stage)
return c
def change_resolution(self, new_resolution, grid_plot=None):
'''
Changes the resolution of every slice of the val cube.
Parameters
----------
new_resolution : Resolution which the val array should get in ``arcsec/pixel.``
grid_plot : ``None``, ``True``
If ``True`` old and new resolution is visualized in a plot.
Default is ``None``.
Returns
-------
cube : SyntheticCube
'''
stage = 'SyntheticCube: change_resolution'
# debugging comment
logger.debug('-' * 70)
logger.debug(stage)
logger.debug('-' * 70)
logger.debug('total value before zoom : ' + str('%1.4e' % np.sum(self.val)) + ' ' + str(self.units))
# match resolution of psf and val slice
f = ConservingZoom(array=self.val, initial_resolution=self.resolution['arcsec'], new_resolution=new_resolution)
zoomed_val = f.zoom()
# average after changing resolution for MJy/sr
if self.units == 'MJy/sr' or self.units == 'Jy/arcsec^2':
# size of new pixel in units of old pixel
size = new_resolution ** 2 / self.resolution['arcsec'] ** 2
zoomed_val = zoomed_val / size
if grid_plot is not None:
f.zoom_grid(self.name)
# debugging comment
logger.debug('total value after zoom : ' + str('%1.4e' % np.sum(zoomed_val)) + ' ' + str(self.units))
# return SimulateCube
c = SyntheticCube(self)
c.val = zoomed_val
c.stage = stage
c.log.append(c.stage)
c.FOV = (f.len_nx / f.len_nrx * self.FOV[0], f.len_ny / f.len_nry * self.FOV[1])
return c
def central_pixel(self, dx, dy):
'''
Move array right and up to create a central pixel.
Returns
-------
cube : SyntheticCube
'''
stage = 'SyntheticCube: central_pixel'
ce = central(array=self.val, dx=dx, dy=dy)
len_x_old = float(self.pixel[0])
len_x_new = float(len(ce[:,0]))
len_y_old = float(self.pixel[1])
len_y_new = float(len(ce[0,:]))
old_FOV = self.FOV
new_FOV = (len_x_new / len_x_old * old_FOV[0], len_y_new / len_y_old * old_FOV[1])
# return SimulateCube
c = SyntheticCube(self)
c.val = ce
c.stage = stage
c.log.append(c.stage)
c.FOV = new_FOV
return c
def convolve_psf(self, psf):
'''
Convolves every slice of the val cube with a PSF of choice.
Parameters
----------
psf : GaussianPSF, FilePSF, database, FunctionPSF
* GaussianPSF(self, diameter): Convolves val with Gaussian PSF.
* FilePSF(self, psf_file, condensed): Reads PSF from input file.
* database: PSF object defined in FluxCompensator database.
* FunctionPSF(self, psf_function, width): Convolves val with calculated PSF.
Returns
-------
cube : SyntheticCube
'''
stage = 'SyntheticCube: convolve_PSF'
# debugging comments
if isinstance(psf, GaussianPSF):
logger.debug('-' * 70)
logger.debug(stage + 'with GaussianPSF')
logger.debug('-' * 70)
# convolve value with classes GaussianPSF, FilePSF and FunctionPSF
val = self.val.copy()
for i in range(len(self.wav)):
val[:, :, i] = psf.convolve(wav = self.wav[i], array = self.val[:,:, i], resolution = self.resolution)
# return SimulateCube
c = SyntheticCube(self)
c.val = val
c.stage = stage
c.log.append(c.stage)
return c
def convolve_filter(self, filter_input, plot_rebin=None, plot_rebin_dpi=None):
'''
Convolves slice within filter limits into a 2D image.
Parameters
----------
filter_input : object
* database : if filter ``name`` from FluxCompensator database is used.
* Filter : if own filter is used.
plot_rebin : ``True``, ``None``
Switch to plot the rebined filter and the original filter in one plot.
plot_rebin_dpi : ``None``, scalar > 0
The resolution in dots per inch.
``None`` is default and will use the value savefig.dpi
in the matplotlibrc file.
Returns
-------
image : SyntheticImage
'''
stage = 'SyntheticCube: convolve_filter'
# debugging comment
logger.debug('-' * 70)
logger.debug(stage)
logger.debug('-' * 70)
weight = filter_input.rebin(self.wav, self.val)
# returns weight{'wav_short' 'val_short' 'Response_new' 'filter_index' 'wavf_0' 'waf_min' 'waf_max' 'filter_name'}
wav_short = weight['wav_short']
val_short = weight['val_short']
filter_index = weight['filter_index']
Response_new = weight['Response_new']
waf_0 = weight['waf_0']
waf_min = weight['waf_min']
waf_max = weight['waf_max']
filter_name = weight['filter_name']
if plot_rebin is not None:
plot = filter_input.plot(val_name=self.name, dpi=plot_rebin_dpi)
# weight val_short with rebined response
val = val_short.copy()
val[:, :, :len(wav_short)] = val_short[:,:, :len(wav_short)] * Response_new[:len(wav_short)]
# collapse remaining cube into 2D
val_2D = np.sum(val, axis=2)
# return SyntheticImage
from .image import SyntheticImage
i = SyntheticImage(self)
i.log.append(stage)
i.stage = 'SyntheticImage: initial'
i.log.append(i.stage)
i.filter = {'name': filter_name, 'waf_0': waf_0, 'waf_min': waf_min, 'waf_max': waf_max}
i.val = val_2D
i.wav = np.array(waf_0)
return i
def add_noise(self, mu_noise, sigma_noise, seed=None, diagnostics=None):
'''
Adds normal distributed noise to every slice in the val cube
of SyntheticCube.
Parameters
----------
mu_noise : float
Mean of the normal distribution.
Good choice: mu_noise = 0.
sigma_noise : float
Standard deviation of the normal distribution. Good choice around:
* ``'ergs/cm^2/s'`` : sigma_noise = 10.**(-13)
* ``'ergs/cm^2/s/Hz'`` : sigma_noise = 10.**(-26)
* ``'Jy'`` : sigma_noise = 10.**(-3)
* ``'mJy'`` : sigma_noise = 10.**(-1)
* ``'MJy/sr'`` : sigma_noise = 10.**(-10)
seed : float, ``None``
When float seed fixes the random numbers to a certain
sequence in order to create reproducible results.
Default is ``None``.
diagnostics : truetype
When ``True`` noise array is stored in a fits file.
Returns
-------
cube : SyntheticCube
'''
stage = 'SyntheticCube: add_noise'
# add different noise with same mu and sigma to 3D cube
val = self.val.copy()
for i in range(len(self.wav)):
if sigma_noise != 0. and sigma_noise != 0:
if seed is not None:
np.random.seed(seed=seed)
noise = normal(mu_noise, sigma_noise, self.pixel)
if sigma_noise == 0. or sigma_noise == 0:
noise = np.zeros(self.pixel)
val[:, :, i] = self.val[:,:, i] + noise
if diagnostics is True:
fits.writeto(self.name + '_process-output_SC-noise.fits', noise, clobber=True)
# return SyntheticCube
c = SyntheticCube(self)
c.val = val
c.stage = stage
c.log.append(c.stage)
return c
def get_rough_sed(self):
'''
Collapses the current val cube into 1D array (SED).
Returns
-------
sed : SyntheticSED
'''
stage = 'SyntheticCube: get_rough_sed'
# for MJy/sr convert first, add and then convert back
if self.unit_out == 'MJy/sr' or self.unit_out == 'Jy/arcsec^2':
s = ConvertUnits(wav=self.wav, val=self.val)
self.val = s.get_unit(in_units=self.units, out_units='Jy', input_resolution=self.resolution['arcsec'])
# collapse every slice to one scalar value
rough_sed = np.sum(np.sum(self.val.copy(), axis=1), axis=0)
if self.unit_out == 'MJy/sr' or self.unit_out == 'Jy/arcsec^2':
s = ConvertUnits(wav=self.wav, val=rough_sed)
rough_sed = s.get_unit(in_units='Jy', out_units=self.unit_out, input_resolution=self.resolution['arcsec'] * self.pixel[0])
# return SyntheticSED
from .sed import SyntheticSED
s = SyntheticSED(self)
s.log.append(stage)
s.stage = 'SyntheticSED: initial'
s.log.append(s.stage)
s.val = rough_sed
return s
def get_total_val(self, wav_1, wav_2):
'''
Collapses the val of SyntheticCube within the boundaries wav_1
and wav_2 into a 0D value val.
WARNING: This tool cannot replace convolve_filter!
But it can be used to produce rough estimates
in-between the processes.
Parameters
----------
wav_1, wav_2 : float
Boundaries in microns.
Returns
-------
val : SyntheticFlux
'''
stage = 'SyntheticCube: get_total_val'
# slices within boundaries are extracted, averaged collapsed to an 2D image and finally collpased to a single scalar value
# for MJy/sr convert first, add and then convert back
if self.unit_out == 'MJy/sr' or self.unit_out == 'Jy/arcsec^2':
s = ConvertUnits(wav=self.wav, val=self.val)
val = s.get_unit(in_units=self.units, out_units='Jy', input_resolution=self.resolution['arcsec'])
else: val = self.val
c = get_slices(wav=self.wav, val=val, wav_1=wav_1, wav_2=wav_2)
i = average_collapse(val=c['val_short'])
f_total = np.sum(i)
# real limits within collaps
wav_max = 10 ** (np.log10(self.wav[c['filter_index'][0]]) + self.spacing_wav / 2.)
wav_min = 10 ** (np.log10(self.wav[c['filter_index'][-1]]) - self.spacing_wav / 2.)
wav_total = central_wav(wav=[wav_min, wav_max])
if self.unit_out == 'MJy/sr' or self.unit_out == 'Jy/arcsec^2':
s = ConvertUnits(wav=wav_total, val=f_total)
f_total = s.get_unit(in_units='Jy', out_units=self.unit_out, input_resolution=self.resolution['arcsec'] * self.pixel[0])
# return SyntheticFlux
from .flux import SyntheticFlux
f = SyntheticFlux(self)
f.log.append(stage)
f.stage = 'SyntheticFlux: initial'
f.log.append(f.stage)
f.wav = np.array(wav_total)
f.val = np.array(f_total)
f.filter = {'name': 'val_tot', 'waf_0': wav_total, 'waf_min': wav_min, 'waf_max': wav_max}
return f
def plot_image(self, wav_interest, prefix=None, name=None, multi_cut=None, single_cut=None, set_cut=None, dpi=None):
'''
Plots a certain slice close the wav_interest.
The wavelength interval of the chosen slice labels the plot.
Parameters
----------
wav_interest : float, ``None``
* float : wavelength close to slice in microns.
* ``None`` : Only if input_array is SyntheticImage like
prefix : str
Name of the image. Default naming chain is switched off.
name : str
Name of image within the default naming chain to distinguish the
plot files. E. g. 'PSF_gaussian'
mulit_cut : ``True``, ``None``
* ``True`` : plots chosen image slice at cuts of [100, 99, 95, 90]%.
* ``None`` : no mulit-plot is returned.
Default is ``None``.
single_cut : float, ``None``
* float : cut level for single plot of image slice between 0 and 100.
* ``None`` : no single plot is returned.
set_cut : tuple, ``None``
* tuple : set_cut(v_min, v_max)
Minimal and maximal physical value of val in the colorbars.
* ``None`` : no plot with minimal and maximal cut is returned.
Default is ``None``.
dpi : ``None``, scalar > 0
The resolution in dots per inch.
``None`` is default and will use the value valig.dpi
in the matplotlibrc file.
Returns
-------
cube : SyntheticCube
'''
stage = 'SyntheticCube: plot_image'
if prefix is None and name is None:
raise Exception('If prefix name is not given, you need to give the a name to enable the default naming chain.')
if prefix is not None:
if multi_cut is True and (single_cut is not None or set_cut is not None):
raise Exception('If prefix naming is enabled only one plotting option can be chosen.')
elif multi_cut is None and (single_cut is not None and set_cut is not None):
raise Exception('If prefix naming is enabled only one plotting option can be chosen.')
plot = MakePlots(prefix=prefix, name=name, input_array=SyntheticCube(self), wav_interest=wav_interest, multi_cut=multi_cut, single_cut=single_cut, set_cut=set_cut, dpi=dpi)
# return SyntheticCube
c = SyntheticCube(self)
c.stage = stage
c.log.append(c.stage)
return c
@property
def spacing_wav(self):
'''
The property spacing_wav estimates the width of the logarithmic
spaced wav entries.
'''
if self.wav.ndim != 0:
spacing_wav = np.log10(self.wav[0] / self.wav[-1]) / (len(self.wav) - 1)
else:
spacing_wav = None
return spacing_wav
@property
def pixel(self):
'''
The property pixel is a tuple which resembles the current pixel in a
value val. ``pixel(x,y)`` are calls as follows:
``x = pixel[0]``
``y = pixel[1]``
'''
if self.val.ndim in (0, 1):
pixel = (None, None)
if self.val.ndim in (2, 3):
pixel = (self.val.shape[0], self.val.shape[1])
return pixel
@property
def shape(self):
'''
The property shape is a string, which resembles the current shape of
the value val.
scalar: ``'()'``
1D: ``'(wav)'``
2D: ``'(x, y)'``
3D: ``'(x, y , wav)'``
'''
if self.val.ndim == 0:
shape = '()'
if self.val.ndim == 1:
shape = '(wav)'
if self.val.ndim == 2:
shape = '(x, y)'
if self.val.ndim == 3:
shape = '(x, y, wav)'
return shape
@property
def resolution(self):
'''
The property resolution tells you the current resolution. If we are already
in the SED or flux everything is considered as one large pixel.
resolution in arcsec per pixel : ``resolution['arcsec']``
resolution in rad per pixel : ``resolution['rad']``
'''
resolution = {}
if self.pixel[0] is None:
resolution['rad'] = self.FOV[0] / 1. / self.distance
else:
resolution['rad'] = self.FOV[0] / self.pixel[0] / self.distance
resolution['arcsec'] = np.degrees(resolution['rad']) * 3600
return resolution
| koepferl/FluxCompensator | fluxcompensator/cube.py | Python | bsd-2-clause | 25,142 | [
"Gaussian"
] | b7d1f37be8e536c4d571145b3b2e57935c54eb3579c294952eb4d452a2143350 |
# $Id$
#
# Copyright (C) 2008 Greg Landrum
#
# @@ All Rights Reserved @@
# This file is part of the RDKit.
# The contents are covered by the terms of the BSD license
# which is included in the file license.txt, found at the root
# of the RDKit source tree.
#
"""piddleQt
This module implements the PIDDLE/Sping API for a Qt4 canvas
Bits have been shamelessly cobbled from piddleSVG.py
Greg Landrum (glandrum@users.sourceforge.net)
"""
"""
Functionality implemented:
x drawLine
x drawPolygon
x drawString
x drawImage
Known problems:
"""
from rdkit.sping import pid
import types
from PyQt4 import QtCore, QtGui, QtSvg
from math import *
import copy
def _ColorToQt(color):
""" convenience function for converting a sping.pid color to a Qt color
"""
if color == pid.transparent:
return None
else:
return QtGui.QColor(int(color.red*255),
int(color.green*255),
int(color.blue*255))
#class QCanvasRotText(QCanvasText):
class QCanvasRotText:
""" used to draw (UGLY) rotated text
"""
def __init__(self,txt,canvas,angle=0):
QCanvasText.__init__(self,txt,canvas)
self._angle = angle
def draw(self,qP):
qP.save()
x = self.x()
y = self.y()
theta = -self._angle
qP.rotate(theta)
qP.translate(-x,-y)
thetaR = theta*pi/180.
newX = cos(-thetaR)*x - sin(-thetaR)*y
newY = sin(-thetaR)*x + cos(-thetaR)*y
qP.translate(newX,newY)
QCanvasText.draw(self,qP)
qP.restore()
class QtCanvas( pid.Canvas ):
def __init__(self,scene,size=None,name='QtCanvas'):
if size is None:
size = scene.width(),scene.height()
self.size=size
pid.Canvas.__init__(self, size, name)
self._scene = scene
self._brush = QtGui.QBrush()
self._pen = QtGui.QPen()
self._font = QtGui.QApplication.font()
self.objs = []
self._initOutput()
self.nObjs = 0
def _initOutput(self):
for obj in self.objs:
if type(obj)==types.TupleType:
self._scene.removeItem(obj[0])
else:
self._scene.removeItem(obj)
self.objs = []
self.nObjs = 0
def _adjustFont(self,font):
if font.face:
self._font.setFamily(font.face)
self._font.setBold(font.bold)
self._font.setItalic(font.italic)
self._font.setPointSize(font.size)
self._font.setUnderline(font.underline)
# public functions
def clear(self):
self._initOutput()
def flush(self):
self._scene.update()
def save(self, file=None, format=None):
self._scene.update()
#------------- drawing methods --------------
def drawLine(self, x1,y1, x2,y2, color=None, width=None, dash=None,
**kwargs ):
"Draw a straight line between x1,y1 and x2,y2."
# set color...
if color:
if color == pid.transparent: return
elif self.defaultLineColor == pid.transparent:
return
else:
color = self.defaultLineColor
qColor = _ColorToQt(color)
if width:
w = width
else:
w = self.defaultLineWidth
self._pen.setColor(qColor)
self._pen.setWidth(w)
if dash is not None:
self._pen.setStyle(QtCore.Qt.DashLine)
#dash = [float(x)/w for x in dash]
dash = list(dash)
self._pen.setDashPattern(dash)
else:
self._pen.setStyle(QtCore.Qt.SolidLine)
l = self._scene.addLine(x1,y1,x2,y2,self._pen)
if dash is not None:
self._pen.setStyle(QtCore.Qt.SolidLine)
self.nObjs+=1
self.objs.append(l)
def drawPolygon(self, pointlist,
edgeColor=None, edgeWidth=None, fillColor=pid.transparent,
closed=0,dash=None,**kwargs):
"""drawPolygon(pointlist) -- draws a polygon
pointlist: a list of (x,y) tuples defining vertices
"""
pts = [QtCore.QPointF(x[0],x[1]) for x in pointlist]
poly = QtGui.QPolygonF(pts)
# set color for fill...
filling = 0
if fillColor:
if fillColor != pid.transparent:
filling = 1
qColor = _ColorToQt(fillColor)
self._brush.setColor(qColor)
if filling:
self._brush.setStyle(QtCore.Qt.SolidPattern)
else:
self._brush.setStyle(QtCore.Qt.NoBrush)
# set color for edge...
if not edgeColor:
edgeColor = self.defaultLineColor
qColor = _ColorToQt(edgeColor)
if qColor:
self._pen.setColor(qColor)
# set edge width...
if edgeWidth is None: edgeWidth = self.defaultLineWidth
self._pen.setWidth(edgeWidth)
self._pen.setJoinStyle(QtCore.Qt.RoundJoin)
if dash is not None:
self._pen.setStyle(QtCore.Qt.DashLine)
else:
self._pen.setStyle(QtCore.Qt.SolidLine)
if not qColor:
self._pen.setStyle(QtCore.Qt.NoPen)
poly = self._scene.addPolygon(poly,self._pen,self._brush)
self.nObjs += 1
poly.setZValue(self.nObjs)
self.objs.append(poly)
if dash is not None:
self._pen.setStyle(QtCore.Qt.SolidLine)
self.nObjs+=1
def drawString(self, s, x,y, font=None, color=None, angle=0, **kwargs):
# set color...
if color:
if color == pid.transparent: return
elif self.defaultLineColor == pid.transparent:
return
else:
color = self.defaultLineColor
if font is None:
font = self.defaultFont
qColor = _ColorToQt(color)
if font is not None:
self._adjustFont(font)
txt=self._scene.addText(s,self._font)
txt.setDefaultTextColor(qColor)
txtH = txt.boundingRect().height()
#txt.setPos(QtCore.QPointF(x,y-txtH/2))
txt.setPos(QtCore.QPointF(x,y-txtH))
#txt.setPos(QtCore.QPointF(x,y))
if angle:
txt.rotate(angle)
#if angle != 0:
# txt = QCanvasRotText(s,self._scene,angle=angle)
#else:
# txt = QCanvasText(s,self._scene)
#txt.setColor(qColor)
#txt.setVisible(1)
#txt.setX(x)
#y -= font.size
#txt.setY(y)
txt.setZValue(self.nObjs)
self.nObjs += 1
self.objs.append(txt)
def drawImage(self, image, x,y, **kwargs):
"""
"""
from io import StringIO
sio = StringIO()
image.save(sio,format='png')
base = QtGui.QPixmap()
base.loadFromData(sio.getvalue())
pix = self._scene.addPixmap(base)
pix.setPos(QtCore.QPointF(x,y))
pix.setZValue(self.nObjs)
self.nObjs += 1
self.objs.append(pix)
def stringBox(self, s, font=None):
"Return the logical width and height of the string if it were drawn \
in the current font (defaults to self.font)."
if not font:
font = self.defaultFont
if font:
self._adjustFont(font)
t = QtGui.QGraphicsTextItem(s)
t.setFont(self._font)
rect = t.boundingRect()
return rect.width(),rect.height()
def stringWidth(self, s, font=None):
"Return the logical width of the string if it were drawn \
in the current font (defaults to self.font)."
if not font:
font = self.defaultFont
if font:
self._adjustFont(font)
t = QtGui.QGraphicsTextItem(s)
t.setFont(self._font)
rect = t.boundingRect()
return rect.width()
def fontAscent(self, font=None):
if not font:
font = self.defaultFont
if font:
self._adjustFont(font)
t = QtGui.QGraphicsTextItem('B')
t.setFont(self._font)
rect = t.boundingRect()
# FIX: this is a hack, but I can't immediately figure out how to solve the
# problem that the bounding rectangle includes the descent:
return 1.0*rect.height()
def fontDescent(self, font=None):
if not font:
font = self.defaultFont
if font:
self._adjustFont(font)
t = QtGui.QGraphicsTextItem('B')
t.setFont(self._font)
rect1 = t.boundingRect()
t = QtGui.QGraphicsTextItem('y')
t.setFont(self._font)
rect2 = t.boundingRect()
return 1.*(rect2.height()-rect1.height())
def test(canvas):
#... for testing...
canvas.defaultLineColor = Color(0.7,0.7,1.0) # light blue
canvas.drawLines( map(lambda i:(i*10,0,i*10,300), range(30)) )
canvas.drawLines( map(lambda i:(0,i*10,300,i*10), range(30)) )
canvas.defaultLineColor = black
canvas.drawLine(10,200, 20,190, color=red)
canvas.drawEllipse( 130,30, 200,100, fillColor=yellow, edgeWidth=4 )
canvas.drawArc( 130,30, 200,100, 45,50, fillColor=blue, edgeColor=navy, edgeWidth=4 )
canvas.defaultLineWidth = 4
canvas.drawRoundRect( 30,30, 100,100, fillColor=blue, edgeColor=maroon )
canvas.drawCurve( 20,20, 100,50, 50,100, 160,160 )
#canvas.drawString("This is a test!", 30,130, Font(face="times",size=16,bold=1),
# color=green, angle=-45)
#canvas.drawString("This is a test!", 30,130, color=red, angle=-45)
polypoints = [ (160,120), (130,190), (210,145), (110,145), (190,190) ]
canvas.drawPolygon(polypoints, fillColor=lime, edgeColor=red, edgeWidth=3, closed=1)
canvas.drawRect( 200,200,260,260, edgeColor=yellow, edgeWidth=5 )
canvas.drawLine( 200,260,260,260, color=green, width=5 )
canvas.drawLine( 260,200,260,260, color=red, width=5 )
canvas.flush()
def dashtest(canvas):
#... for testing...
canvas.defaultLineColor = Color(0.7,0.7,1.0) # light blue
canvas.drawLines( map(lambda i:(i*10,0,i*10,300), range(30)),dash=(3,3) )
canvas.drawLines( map(lambda i:(0,i*10,300,i*10), range(30)),dash=(3,3) )
canvas.defaultLineColor = black
canvas.drawLine(10,200, 20,190, color=red,dash=(3,3))
canvas.drawEllipse( 130,30, 200,100, fillColor=yellow, edgeWidth=4,dash=(3,3) )
canvas.drawArc( 130,30, 200,100, 45,50, fillColor=blue, edgeColor=navy, edgeWidth=4,dash=(3,3) )
canvas.defaultLineWidth = 4
canvas.drawRoundRect( 30,30, 100,100, fillColor=blue, edgeColor=maroon,dash=(3,3) )
canvas.drawCurve( 20,20, 100,50, 50,100, 160,160,dash=(3,3) )
canvas.drawString("This is a test!", 30,130, Font(face="times",size=16,bold=1),
color=green, angle=-45)
canvas.drawString("This is a test!", 30,130, color=red, angle=-45)
polypoints = [ (160,120), (130,190), (210,145), (110,145), (190,190) ]
canvas.drawPolygon(polypoints, fillColor=lime, edgeColor=red, edgeWidth=3, closed=1,dash=(3,3))
canvas.drawRect( 200,200,260,260, edgeColor=yellow, edgeWidth=5,dash=(3,3) )
canvas.drawLine( 200,260,260,260, color=green, width=5,dash=(3,3) )
canvas.drawLine( 260,200,260,260, color=red, width=5,dash=(3,3) )
canvas.flush()
if __name__=='__main__':
import sys
from rdkit.sping.pid import *
from PIL import Image
app = QtGui.QApplication(sys.argv)
w = QtGui.QGraphicsView()
scene= QtGui.QGraphicsScene(0,0,300,300)
canv = QtCanvas(scene)
test(canv)
w.setScene(scene)
w.show()
sys.exit(app.exec_())
| soerendip42/rdkit | rdkit/sping/Qt/pidQt4.py | Python | bsd-3-clause | 10,601 | [
"RDKit"
] | a0b1fc680132aab8b117ae6f374c759ca50e16aa506053be51d68d5af777a140 |
#!/usr/bin/env python
#
# Authors: Gregory S Mendez and Bastian Bentlage
#
# This script fetches sequences listed in blast results files and writes out a plain text
# file to be used by another script to write new fasta files with the full length sequences
# using the def-lines listed in the blast results and a large fasta file used to generate
# the blastdb the blast results came from.
#
# This script takes 2 arguments:
# 1) --blast - The directory containing the blast output files (in XML format)
# 2) --outdir - The directory to write the text files to
#
# Usage: PepFromBlast.py --blast ~/GreenAlgae/big_blastp/ --outdir ~/GreenAlgae/pepfromblast_txt/
#
# The script get_seq.sh should be used after this script to write the fasta files using the text
# files generated by this script.
from Bio.Blast import NCBIXML
from glob import glob
from Bio import SeqIO, Seq
import sys
import argparse
#from pyfaidx import Fasta
def ExtractPeps(BLAST_Results, OutDirectory):
SPECIES_GENE = {}
# Loop over the BLAST XML and retrieve the compIDs
for BLASTout in glob('%s*out' % BLAST_Results):
# Define species and gene ID
SPECIES = '_'.join(BLASTout.split('/')[-1].split('.')[1].split('_')[2:])
GENE = BLASTout.split('query_')[-1].split('.')[0]
# Create a list of ORFs we want to retrieve from Transdecoder
# Take the sequence ID from the BLAST XML and put it into a list
for blast_record in NCBIXML.parse(open(BLASTout)):
for Alignment in blast_record.alignments:
ORF_ID = str(Alignment).split()[1]
try:
SPECIES_GENE[SPECIES][GENE].add(ORF_ID)
except KeyError:
try:
SPECIES_GENE[SPECIES]
SPECIES_GENE[SPECIES][GENE] = set()
SPECIES_GENE[SPECIES][GENE].add(ORF_ID)
except KeyError:
SPECIES_GENE[SPECIES] = {}
SPECIES_GENE[SPECIES][GENE] = set()
SPECIES_GENE[SPECIES][GENE].add(ORF_ID)
continue
# Write text files for each gene_species of all the sequences to be fetched by a
# separate script: get_seq.sh
for SPECIES, GENE_ORFS in SPECIES_GENE.iteritems():
for KOG, GENE_ORF in GENE_ORFS.iteritems():
OutFasta = '%s/%s_%s.txt' % (OutDirectory,KOG, SPECIES)
with open(OutFasta, 'a') as Out:
for VALUE in list(GENE_ORF):
Out.write('%s\n' % VALUE)
# Alternative code to write fasta files out directly rather than just text files.
# These methods are much much slower than outputting text files then fetching the
# sequences using the separate script: get_seq.sh which is paralleled.
#
# pyfaidx version
# for SPECIES, GENE_ORFS in SPECIES_GENE.iteritems():
# for KOG, GENE_ORF in GENE_ORFS.iteritems():
# ORFs = '%s%s%s' % (ORF_Directory, SPECIES, '.fa.transdecoder.pep')
# GENES = Fasta(ORFs)
# OutFasta = '%s/%s_%s.faa' % (OutDirectory,KOG, SPECIES)
# with open(OutFasta, 'a') as Out:
# for KEY in list(GENE_ORF):
# Out.write('>%s-%s\n%s\n' % (SPECIES, GENES[KEY].name, GENES[KEY]))
# biopython version
# for SPECIES, GENE_ORFS in SPECIES_GENE.iteritems():
# # Where are the ORFs located - transdecoder
# ORFs = '%s%s%s' % (ORF_Directory, SPECIES, '.fa.transdecoder.pep')
# with open(ORFs, 'rU') as FastaFile:
# for record in SeqIO.parse(FastaFile, "fasta"):
# for KOG, GENE_ORF in GENE_ORFS.iteritems():
# for ORF in GENE_ORF:
# if record.name == ORF:
# OutFasta = '%s/%s_%s.faa' % (OutDirectory,KOG, SPECIES)
# with open(OutFasta, 'a') as Out:
# Out.write('>%s-%s\n%s\n' % (SPECIES, record.name, record.seq))
# Argument Parser
parser = argparse.ArgumentParser(description = 'This script fetches sequences listed in blast results files and writes out a plain text file to be used by another script to write new fasta files with the full length sequences using the def-lines listed in the blast results and a large fasta file used to generate the blastdb the blast results came from.')
parser.add_argument('--blast', required=True, help='BLAST results XML directory')
parser.add_argument('--outdir', required=True, help='Output gets written here')
args = parser.parse_args()
ExtractPeps(args.blast, args.outdir)
| mendezg/DATOL | PepFromBlast.py | Python | gpl-2.0 | 4,106 | [
"BLAST",
"Biopython"
] | c21e7b1b63c0e31d65fae86ae9f67fede8455ab381c1db1f05734f79480fd641 |
"""
Implementations of Restricted Boltzmann Machines and associated sampling
strategies.
"""
# Standard library imports
import logging
# Third-party imports
import numpy
N = numpy
np = numpy
import theano
from theano import tensor
from theano.compat.six.moves import zip as izip
T = tensor
from theano.tensor import nnet
# Local imports
from pylearn2.costs.cost import Cost
from pylearn2.blocks import Block, StackedBlocks
from pylearn2.utils import as_floatX, safe_update, sharedX
from pylearn2.models import Model
from pylearn2.expr.nnet import inverse_sigmoid_numpy
from pylearn2.linear.matrixmul import MatrixMul
from pylearn2.space import VectorSpace
from pylearn2.utils import safe_union
from pylearn2.utils.exc import reraise_as
from pylearn2.utils.rng import make_np_rng, make_theano_rng
theano.config.warn.sum_div_dimshuffle_bug = False
logger = logging.getLogger(__name__)
if 0:
logger.warning('using SLOW rng')
RandomStreams = tensor.shared_randomstreams.RandomStreams
else:
import theano.sandbox.rng_mrg
RandomStreams = theano.sandbox.rng_mrg.MRG_RandomStreams
def training_updates(visible_batch, model, sampler, optimizer):
"""
Combine together updates from various sources for RBM training.
Parameters
----------
visible_batch : tensor_like
Theano symbolic representing a minibatch on the visible units,
with the first dimension indexing training examples and the second
indexing data dimensions.
model : object
An instance of `RBM` or a derived class, or one implementing
the RBM interface.
sampler : object
An instance of `Sampler` or a derived class, or one implementing
the sampler interface.
optimizer : object
An instance of `_Optimizer` or a derived class, or one implementing
the optimizer interface (typically an `_SGDOptimizer`).
Returns
-------
WRITEME
"""
# TODO: the Optimizer object got deprecated, and this is the only
# functionality that requires it. We moved the Optimizer
# here with an _ before its name.
# We should figure out how best to refactor the code.
# Optimizer was problematic because people kept using SGDOptimizer
# instead of training_algorithms.sgd.
# Compute negative phase updates.
sampler_updates = sampler.updates()
# Compute SML gradients.
pos_v = visible_batch
#neg_v = sampler_updates[sampler.particles]
neg_v = sampler.particles
grads = model.ml_gradients(pos_v, neg_v)
# Build updates dictionary combining (gradient, sampler) updates.
ups = optimizer.updates(gradients=grads)
safe_update(ups, sampler_updates)
return ups
class Sampler(object):
"""
A sampler is responsible for implementing a sampling strategy on top of
an RBM, which may include retaining state e.g. the negative particles for
Persistent Contrastive Divergence.
Parameters
----------
rbm : object
An instance of `RBM` or a derived class, or one implementing
the `gibbs_step_for_v` interface.
particles : numpy.ndarray
An initial state for the set of persistent Narkov chain particles
that will be updated at every step of learning.
rng : RandomState object
NumPy random number generator object used to initialize a
RandomStreams object used in training.
"""
def __init__(self, rbm, particles, rng):
self.__dict__.update(rbm=rbm)
rng = make_np_rng(rng, which_method="randn")
seed = int(rng.randint(2 ** 30))
self.s_rng = make_theano_rng(seed, which_method="binomial")
self.particles = sharedX(particles, name='particles')
def updates(self):
"""
Get the dictionary of updates for the sampler's persistent state
at each step.
Returns
-------
updates : dict
Dictionary with shared variable instances as keys and symbolic
expressions indicating how they should be updated as values.
Notes
-----
In the `Sampler` base class, this is simply a stub.
"""
raise NotImplementedError()
class BlockGibbsSampler(Sampler):
"""
Implements a persistent Markov chain based on block gibbs sampling
for use with Persistent Contrastive
Divergence, a.k.a. stochastic maximum likelhiood, as described in [1].
.. [1] T. Tieleman. "Training Restricted Boltzmann Machines using
approximations to the likelihood gradient". Proceedings of the 25th
International Conference on Machine Learning, Helsinki, Finland,
2008. http://www.cs.toronto.edu/~tijmen/pcd/pcd.pdf
Parameters
----------
rbm : object
An instance of `RBM` or a derived class, or one implementing
the `gibbs_step_for_v` interface.
particles : ndarray
An initial state for the set of persistent Markov chain particles
that will be updated at every step of learning.
rng : RandomState object
NumPy random number generator object used to initialize a
RandomStreams object used in training.
steps : int, optional
Number of Gibbs steps to run the Markov chain for at each
iteration.
particles_clip : None or (min, max) pair, optional
The values of the returned particles will be clipped between
min and max.
"""
def __init__(self, rbm, particles, rng, steps=1, particles_clip=None):
super(BlockGibbsSampler, self).__init__(rbm, particles, rng)
self.steps = steps
self.particles_clip = particles_clip
def updates(self, particles_clip=None):
"""
Get the dictionary of updates for the sampler's persistent state
at each step.
Parameters
----------
particles_clip : WRITEME
Returns
-------
updates : dict
Dictionary with shared variable instances as keys and symbolic
expressions indicating how they should be updated as values.
"""
steps = self.steps
particles = self.particles
# TODO: do this with scan?
for i in xrange(steps):
particles, _locals = self.rbm.gibbs_step_for_v(
particles,
self.s_rng
)
assert particles.type.dtype == self.particles.type.dtype
if self.particles_clip is not None:
p_min, p_max = self.particles_clip
# The clipped values should still have the same type
dtype = particles.dtype
p_min = tensor.as_tensor_variable(p_min)
if p_min.dtype != dtype:
p_min = tensor.cast(p_min, dtype)
p_max = tensor.as_tensor_variable(p_max)
if p_max.dtype != dtype:
p_max = tensor.cast(p_max, dtype)
particles = tensor.clip(particles, p_min, p_max)
if not hasattr(self.rbm, 'h_sample'):
self.rbm.h_sample = sharedX(numpy.zeros((0, 0)), 'h_sample')
return {
self.particles: particles,
# TODO: self.rbm.h_sample is never used, why is that here?
# Moreover, it does not make sense for things like ssRBM.
self.rbm.h_sample: _locals['h_mean']
}
class RBM(Block, Model):
"""
A base interface for RBMs, implementing the binary-binary case.
Parameters
----------
nvis : int, optional
Number of visible units in the model.
(Specifying this implies that the model acts on a vector,
i.e. it sets vis_space = pylearn2.space.VectorSpace(nvis) )
nhid : int, optional
Number of hidden units in the model.
(Specifying this implies that the model acts on a vector)
vis_space : pylearn2.space.Space, optional
Space object describing what kind of vector space the RBM acts
on. Don't specify if you used nvis / hid
hid_space: pylearn2.space.Space, optional
Space object describing what kind of vector space the RBM's
hidden units live in. Don't specify if you used nvis / nhid
transformer : WRITEME
irange : float, optional
The size of the initial interval around 0 for weights.
rng : RandomState object or seed, optional
NumPy RandomState object to use when initializing parameters
of the model, or (integer) seed to use to create one.
init_bias_vis : array_like, optional
Initial value of the visible biases, broadcasted as necessary.
init_bias_vis_marginals : pylearn2.datasets.dataset.Dataset or None
Optional. Dataset used to initialize the visible biases to the
inverse sigmoid of the data marginals
init_bias_hid : array_like, optional
initial value of the hidden biases, broadcasted as necessary.
base_lr : float, optional
The base learning rate
anneal_start : int, optional
Number of steps after which to start annealing on a 1/t schedule
nchains : int, optional
Number of negative chains
sml_gibbs_steps : int, optional
Number of gibbs steps to take per update
random_patches_src : pylearn2.datasets.dataset.Dataset or None
Optional. Dataset from which to draw random patches in order to
initialize the weights. Patches will be multiplied by irange.
monitor_reconstruction : bool, optional
If True, will request a monitoring channel to monitor
reconstruction error
Notes
-----
The `RBM` class is redundant now that we have a `DBM` class, since
an RBM is just a DBM with one hidden layer. Users of pylearn2 should
use single-layer DBMs when possible. Not all RBM functionality has
been ported to the DBM framework yet, so this is not always possible.
(Examples: spike-and-slab RBMs, score matching, denoising score matching)
pylearn2 developers should not add new features to the RBM class or
add new RBM subclasses. pylearn2 developers should only add documentation
and bug fixes to the RBM class and subclasses. pylearn2 developers should
finish porting all RBM functionality to the DBM framework, then turn
the RBM class into a thin wrapper around the DBM class that allocates
a single layer DBM.
"""
def __init__(self, nvis = None, nhid = None,
vis_space = None,
hid_space = None,
transformer = None,
irange=0.5, rng=None, init_bias_vis = None,
init_bias_vis_marginals = None, init_bias_hid=0.0,
base_lr = 1e-3, anneal_start = None, nchains = 100,
sml_gibbs_steps = 1,
random_patches_src = None,
monitor_reconstruction = False):
Model.__init__(self)
Block.__init__(self)
if init_bias_vis_marginals is not None:
assert init_bias_vis is None
X = init_bias_vis_marginals.X
assert X.min() >= 0.0
assert X.max() <= 1.0
marginals = X.mean(axis=0)
#rescale the marginals a bit to avoid NaNs
init_bias_vis = inverse_sigmoid_numpy(.01 + .98 * marginals)
if init_bias_vis is None:
init_bias_vis = 0.0
rng = make_np_rng(rng, 1001, which_method="uniform")
self.rng = rng
if vis_space is None:
#if we don't specify things in terms of spaces and a transformer,
#assume dense matrix multiplication and work off of nvis, nhid
assert hid_space is None
assert transformer is None or isinstance(transformer,MatrixMul)
assert nvis is not None
assert nhid is not None
if transformer is None:
if random_patches_src is None:
W = rng.uniform(-irange, irange, (nvis, nhid))
else:
if hasattr(random_patches_src, '__array__'):
W = irange * random_patches_src.T
assert W.shape == (nvis, nhid)
else:
W = irange * random_patches_src.get_batch_design(
nhid).T
self.transformer = MatrixMul( sharedX(
W,
name='W',
borrow=True
)
)
else:
self.transformer = transformer
self.vis_space = VectorSpace(nvis)
self.hid_space = VectorSpace(nhid)
else:
assert hid_space is not None
assert transformer is not None
assert nvis is None
assert nhid is None
self.vis_space = vis_space
self.hid_space = hid_space
self.transformer = transformer
try:
b_vis = self.vis_space.get_origin()
b_vis += init_bias_vis
except ValueError:
reraise_as(ValueError("bad shape or value for init_bias_vis"))
self.bias_vis = sharedX(b_vis, name='bias_vis', borrow=True)
try:
b_hid = self.hid_space.get_origin()
b_hid += init_bias_hid
except ValueError:
reraise_as(ValueError('bad shape or value for init_bias_hid'))
self.bias_hid = sharedX(b_hid, name='bias_hid', borrow=True)
self.random_patches_src = random_patches_src
self.register_names_to_del(['random_patches_src'])
self.__dict__.update(nhid=nhid, nvis=nvis)
self._params = safe_union(self.transformer.get_params(),
[self.bias_vis, self.bias_hid])
self.base_lr = base_lr
self.anneal_start = anneal_start
self.nchains = nchains
self.sml_gibbs_steps = sml_gibbs_steps
def get_default_cost(self):
"""
.. todo::
WRITEME
"""
raise NotImplementedError("The RBM class predates the current "
"Cost-based training algorithms (SGD and BGD). To train "
"the RBM with PCD, use DefaultTrainingAlgorithm rather "
"than SGD or BGD. Some RBM subclassess may also be "
"trained with SGD or BGD by using the "
"Cost classes defined in pylearn2.costs.ebm_estimation. "
"Note that it is also possible to make an RBM by allocating "
"a DBM with only one hidden layer. The DBM class is newer "
"and supports training with SGD / BGD. In the long run we "
"should remove the old RBM class and turn it into a wrapper "
"around the DBM class that makes a 1-layer DBM.")
def get_input_dim(self):
"""
Returns
-------
dim : int
The number of elements in the input, if the input is a vector.
"""
if not isinstance(self.vis_space, VectorSpace):
raise TypeError("Can't describe " + str(type(self.vis_space))
+ " as a dimensionality number.")
return self.vis_space.dim
def get_output_dim(self):
"""
Returns
-------
dim : int
The number of elements in the output, if the output is a vector.
"""
if not isinstance(self.hid_space, VectorSpace):
raise TypeError("Can't describe " + str(type(self.hid_space))
+ " as a dimensionality number.")
return self.hid_space.dim
def get_input_space(self):
"""
.. todo::
WRITEME
"""
return self.vis_space
def get_output_space(self):
"""
.. todo::
WRITEME
"""
return self.hid_space
def get_params(self):
"""
.. todo::
WRITEME
"""
return [param for param in self._params]
def get_weights(self, borrow=False):
"""
.. todo::
WRITEME
"""
weights ,= self.transformer.get_params()
return weights.get_value(borrow=borrow)
def get_weights_topo(self):
"""
.. todo::
WRITEME
"""
return self.transformer.get_weights_topo()
def get_weights_format(self):
"""
.. todo::
WRITEME
"""
return ['v', 'h']
def get_monitoring_channels(self, data):
"""
.. todo::
WRITEME
"""
V = data
theano_rng = make_theano_rng(None, 42, which_method="binomial")
H = self.mean_h_given_v(V)
h = H.mean(axis=0)
return { 'bias_hid_min' : T.min(self.bias_hid),
'bias_hid_mean' : T.mean(self.bias_hid),
'bias_hid_max' : T.max(self.bias_hid),
'bias_vis_min' : T.min(self.bias_vis),
'bias_vis_mean' : T.mean(self.bias_vis),
'bias_vis_max': T.max(self.bias_vis),
'h_min' : T.min(h),
'h_mean': T.mean(h),
'h_max' : T.max(h),
'reconstruction_error' : self.reconstruction_error(V,
theano_rng) }
def get_monitoring_data_specs(self):
"""
Get the data_specs describing the data for get_monitoring_channel.
This implementation returns specification corresponding to unlabeled
inputs.
Returns
-------
WRITEME
"""
return (self.get_input_space(), self.get_input_source())
def ml_gradients(self, pos_v, neg_v):
"""
Get the contrastive gradients given positive and negative phase
visible units.
Parameters
----------
pos_v : tensor_like
Theano symbolic representing a minibatch on the visible units,
with the first dimension indexing training examples and the
second indexing data dimensions (usually actual training data).
neg_v : tensor_like
Theano symbolic representing a minibatch on the visible units,
with the first dimension indexing training examples and the
second indexing data dimensions (usually reconstructions of the
data or sampler particles from a persistent Markov chain).
Returns
-------
grads : list
List of Theano symbolic variables representing gradients with
respect to model parameters, in the same order as returned by
`params()`.
Notes
-----
`pos_v` and `neg_v` need not have the same first dimension, i.e.
minibatch size.
"""
# taking the mean over each term independently allows for different
# mini-batch sizes in the positive and negative phase.
ml_cost = (self.free_energy_given_v(pos_v).mean() -
self.free_energy_given_v(neg_v).mean())
grads = tensor.grad(ml_cost, self.get_params(),
consider_constant=[pos_v, neg_v])
return grads
def train_batch(self, dataset, batch_size):
"""
.. todo::
WRITEME properly
A default learning rule based on SML
"""
self.learn_mini_batch(dataset.get_batch_design(batch_size))
return True
def learn_mini_batch(self, X):
"""
.. todo::
WRITEME
A default learning rule based on SML
"""
if not hasattr(self, 'learn_func'):
self.redo_theano()
rval = self.learn_func(X)
return rval
def redo_theano(self):
"""
Compiles the theano function for the default learning rule
"""
init_names = dir(self)
minibatch = tensor.matrix()
optimizer = _SGDOptimizer(self, self.base_lr, self.anneal_start)
sampler = sampler = BlockGibbsSampler(self, 0.5 + np.zeros((
self.nchains, self.get_input_dim())), self.rng,
steps= self.sml_gibbs_steps)
updates = training_updates(visible_batch=minibatch, model=self,
sampler=sampler, optimizer=optimizer)
self.learn_func = theano.function([minibatch], updates=updates)
final_names = dir(self)
self.register_names_to_del([name for name in final_names
if name not in init_names])
def gibbs_step_for_v(self, v, rng):
"""
Do a round of block Gibbs sampling given visible configuration
Parameters
----------
v : tensor_like
Theano symbolic representing the hidden unit states for a batch
of training examples (or negative phase particles), with the
first dimension indexing training examples and the second
indexing data dimensions.
rng : RandomStreams object
Random number generator to use for sampling the hidden and
visible units.
Returns
-------
v_sample : tensor_like
Theano symbolic representing the new visible unit state after one
round of Gibbs sampling.
locals : dict
Contains the following auxiliary state as keys (all symbolics
except shape tuples):
* `h_mean`: the returned value from `mean_h_given_v`
* `h_mean_shape`: shape tuple indicating the size of
`h_mean` and `h_sample`
* `h_sample`: the stochastically sampled hidden units
* `v_mean_shape`: shape tuple indicating the shape of
`v_mean` and `v_sample`
* `v_mean`: the returned value from `mean_v_given_h`
* `v_sample`: the stochastically sampled visible units
"""
h_mean = self.mean_h_given_v(v)
assert h_mean.type.dtype == v.type.dtype
# For binary hidden units
# TODO: factor further to extend to other kinds of hidden units
# (e.g. spike-and-slab)
h_sample = rng.binomial(size = h_mean.shape, n = 1 , p = h_mean,
dtype=h_mean.type.dtype)
assert h_sample.type.dtype == v.type.dtype
# v_mean is always based on h_sample, not h_mean, because we don't
# want h transmitting more than one bit of information per unit.
v_mean = self.mean_v_given_h(h_sample)
assert v_mean.type.dtype == v.type.dtype
v_sample = self.sample_visibles([v_mean], v_mean.shape, rng)
assert v_sample.type.dtype == v.type.dtype
return v_sample, locals()
def sample_visibles(self, params, shape, rng):
"""
Stochastically sample the visible units given hidden unit
configurations for a set of training examples.
Parameters
----------
params : list
List of the necessary parameters to sample :math:`p(v|h)`. In the
case of a binary-binary RBM this is a single-element list
containing the symbolic representing :math:`p(v|h)`, as returned
by `mean_v_given_h`.
Returns
-------
vprime : tensor_like
Theano symbolic representing stochastic samples from :math:`p(v|h)`
"""
v_mean = params[0]
return as_floatX(rng.uniform(size=shape) < v_mean)
def input_to_h_from_v(self, v):
"""
Compute the affine function (linear map plus bias) that serves as
input to the hidden layer in an RBM.
Parameters
----------
v : tensor_like or list of tensor_likes
Theano symbolic (or list thereof) representing the one or several
minibatches on the visible units, with the first dimension
indexing training examples and the second indexing data dimensions.
Returns
-------
a : tensor_like or list of tensor_likes
Theano symbolic (or list thereof) representing the input to each
hidden unit for each training example.
"""
if isinstance(v, tensor.Variable):
return self.bias_hid + self.transformer.lmul(v)
else:
return [self.input_to_h_from_v(vis) for vis in v]
def input_to_v_from_h(self, h):
"""
Compute the affine function (linear map plus bias) that serves as
input to the visible layer in an RBM.
Parameters
----------
h : tensor_like or list of tensor_likes
Theano symbolic (or list thereof) representing the one or several
minibatches on the hidden units, with the first dimension
indexing training examples and the second indexing data dimensions.
Returns
-------
a : tensor_like or list of tensor_likes
Theano symbolic (or list thereof) representing the input to each
visible unit for each row of h.
"""
if isinstance(h, tensor.Variable):
return self.bias_vis + self.transformer.lmul_T(h)
else:
return [self.input_to_v_from_h(hid) for hid in h]
def upward_pass(self, v):
"""
Wrapper around mean_h_given_v method. Called when RBM is accessed
by mlp.HiddenLayer.
"""
return self.mean_h_given_v(v)
def mean_h_given_v(self, v):
"""
Compute the mean activation of the hidden units given visible unit
configurations for a set of training examples.
Parameters
----------
v : tensor_like or list of tensor_likes
Theano symbolic (or list thereof) representing the hidden unit
states for a batch (or several) of training examples, with the
first dimension indexing training examples and the second
indexing data dimensions.
Returns
-------
h : tensor_like or list of tensor_likes
Theano symbolic (or list thereof) representing the mean
(deterministic) hidden unit activations given the visible units.
"""
if isinstance(v, tensor.Variable):
return nnet.sigmoid(self.input_to_h_from_v(v))
else:
return [self.mean_h_given_v(vis) for vis in v]
def mean_v_given_h(self, h):
"""
Compute the mean activation of the visibles given hidden unit
configurations for a set of training examples.
Parameters
----------
h : tensor_like or list of tensor_likes
Theano symbolic (or list thereof) representing the hidden unit
states for a batch (or several) of training examples, with the
first dimension indexing training examples and the second
indexing hidden units.
Returns
-------
vprime : tensor_like or list of tensor_likes
Theano symbolic (or list thereof) representing the mean
(deterministic) reconstruction of the visible units given the
hidden units.
"""
if isinstance(h, tensor.Variable):
return nnet.sigmoid(self.input_to_v_from_h(h))
else:
return [self.mean_v_given_h(hid) for hid in h]
def free_energy_given_v(self, v):
"""
Calculate the free energy of a visible unit configuration by
marginalizing over the hidden units.
Parameters
----------
v : tensor_like
Theano symbolic representing the hidden unit states for a batch
of training examples, with the first dimension indexing training
examples and the second indexing data dimensions.
Returns
-------
f : tensor_like
1-dimensional tensor (vector) representing the free energy
associated with each row of v.
"""
sigmoid_arg = self.input_to_h_from_v(v)
return (-tensor.dot(v, self.bias_vis) -
nnet.softplus(sigmoid_arg).sum(axis=1))
def free_energy(self, V):
return self.free_energy_given_v(V)
def free_energy_given_h(self, h):
"""
Calculate the free energy of a hidden unit configuration by
marginalizing over the visible units.
Parameters
----------
h : tensor_like
Theano symbolic representing the hidden unit states, with the
first dimension indexing training examples and the second
indexing data dimensions.
Returns
-------
f : tensor_like
1-dimensional tensor (vector) representing the free energy
associated with each row of v.
"""
sigmoid_arg = self.input_to_v_from_h(h)
return (-tensor.dot(h, self.bias_hid) -
nnet.softplus(sigmoid_arg).sum(axis=1))
def __call__(self, v):
"""
Forward propagate (symbolic) input through this module, obtaining
a representation to pass on to layers above.
This just aliases the `mean_h_given_v()` function for syntactic
sugar/convenience.
"""
return self.mean_h_given_v(v)
def reconstruction_error(self, v, rng):
"""
Compute the mean-squared error (mean over examples, sum over units)
across a minibatch after a Gibbs step starting from the training data.
Parameters
----------
v : tensor_like
Theano symbolic representing the hidden unit states for a batch
of training examples, with the first dimension indexing training
examples and the second indexing data dimensions.
rng : RandomStreams object
Random number generator to use for sampling the hidden and
visible units.
Returns
-------
mse : tensor_like
0-dimensional tensor (essentially a scalar) indicating the mean
reconstruction error across the minibatch.
Notes
-----
The reconstruction used to assess error samples only the hidden
units. For the visible units, it uses the conditional mean. No sampling
of the visible units is done, to reduce noise in the estimate.
"""
sample, _locals = self.gibbs_step_for_v(v, rng)
return ((_locals['v_mean'] - v) ** 2).sum(axis=1).mean()
class GaussianBinaryRBM(RBM):
"""
An RBM with Gaussian visible units and binary hidden units.
Parameters
----------
energy_function_class : WRITEME
nvis : int, optional
Number of visible units in the model.
nhid : int, optional
Number of hidden units in the model.
vis_space : WRITEME
hid_space : WRITEME
irange : float, optional
The size of the initial interval around 0 for weights.
rng : RandomState object or seed, optional
NumPy RandomState object to use when initializing parameters
of the model, or (integer) seed to use to create one.
mean_vis : bool, optional
Don't actually sample visibles; make sample method simply return
mean.
init_sigma : float or numpy.ndarray, optional
Initial value of the sigma variable. If init_sigma is a scalar
and sigma is not, will be broadcasted.
learn_sigma : bool, optional
WRITEME
sigma_lr_scale : float, optional
WRITEME
init_bias_hid : scalar or 1-d array of length `nhid`
Initial value for the biases on hidden units.
min_sigma, max_sigma : float, float, optional
Elements of sigma are clipped to this range during learning
"""
def __init__(self, energy_function_class,
nvis = None,
nhid = None,
vis_space = None,
hid_space = None,
transformer = None,
irange=0.5, rng=None,
mean_vis=False, init_sigma=2., learn_sigma=False,
sigma_lr_scale=1., init_bias_hid=0.0,
min_sigma = .1, max_sigma = 10.):
super(GaussianBinaryRBM, self).__init__(nvis = nvis, nhid = nhid,
transformer = transformer,
vis_space = vis_space,
hid_space = hid_space,
irange = irange, rng = rng,
init_bias_hid = init_bias_hid)
self.learn_sigma = learn_sigma
self.init_sigma = init_sigma
self.sigma_lr_scale = float(sigma_lr_scale)
if energy_function_class.supports_vector_sigma():
base = N.ones(nvis)
else:
base = 1
self.sigma_driver = sharedX(
base * init_sigma / self.sigma_lr_scale,
name='sigma_driver',
borrow=True
)
self.sigma = self.sigma_driver * self.sigma_lr_scale
self.min_sigma = min_sigma
self.max_sigma = max_sigma
if self.learn_sigma:
self._params.append(self.sigma_driver)
self.mean_vis = mean_vis
self.energy_function = energy_function_class(
transformer = self.transformer,
sigma=self.sigma,
bias_vis=self.bias_vis,
bias_hid=self.bias_hid
)
def _modify_updates(self, updates):
"""
.. todo::
WRITEME
"""
if self.sigma_driver in updates:
assert self.learn_sigma
updates[self.sigma_driver] = T.clip(
updates[self.sigma_driver],
self.min_sigma / self.sigma_lr_scale,
self.max_sigma / self.sigma_lr_scale
)
def score(self, V):
"""
.. todo::
WRITEME
"""
return self.energy_function.score(V)
def P_H_given_V(self, V):
"""
.. todo::
WRITEME
"""
return self.energy_function.mean_H_given_V(V)
def mean_h_given_v(self, v):
"""
.. todo::
WRITEME
"""
return self.P_H_given_V(v)
def mean_v_given_h(self, h):
"""
Compute the mean activation of the visibles given hidden unit
configurations for a set of training examples.
Parameters
----------
h : tensor_like
Theano symbolic representing the hidden unit states for a batch
of training examples, with the first dimension indexing training
examples and the second indexing hidden units.
Returns
-------
vprime : tensor_like
Theano symbolic representing the mean (deterministic)
reconstruction of the visible units given the hidden units.
"""
return self.energy_function.mean_V_given_H(h)
#return self.bias_vis + self.sigma * tensor.dot(h, self.weights.T)
def free_energy_given_v(self, V):
"""
Calculate the free energy of a visible unit configuration by
marginalizing over the hidden units.
Parameters
----------
v : tensor_like
Theano symbolic representing the hidden unit states for a batch
of training examples, with the first dimension indexing training
examples and the second indexing data dimensions.
Returns
-------
f : tensor_like
1-dimensional tensor representing the free energy of the visible
unit configuration for each example in the batch
"""
"""hid_inp = self.input_to_h_from_v(v)
squared_term = ((self.bias_vis - v) ** 2.) / (2. * self.sigma)
rval = squared_term.sum(axis=1) - nnet.softplus(hid_inp).sum(axis=1)
assert len(rval.type.broadcastable) == 1"""
return self.energy_function.free_energy(V)
def free_energy(self, V):
"""
.. todo::
WRITEME
"""
return self.energy_function.free_energy(V)
def sample_visibles(self, params, shape, rng):
"""
Stochastically sample the visible units given hidden unit
configurations for a set of training examples.
Parameters
----------
params : list
List of the necessary parameters to sample :math:`p(v|h)`.
In the case of a Gaussian-binary RBM this is a single-element
list containing the conditional mean.
shape : WRITEME
rng : WRITEME
Returns
-------
vprime : tensor_like
Theano symbolic representing stochastic samples from
:math:`p(v|h)`
Notes
-----
If `mean_vis` is specified as `True` in the constructor, this is
equivalent to a call to `mean_v_given_h`.
"""
v_mean = params[0]
if self.mean_vis:
return v_mean
else:
# zero mean, std sigma noise
zero_mean = rng.normal(size=shape) * self.sigma
return zero_mean + v_mean
class mu_pooled_ssRBM(RBM):
"""
.. todo::
WRITEME
Parameters
----------
alpha : WRITEME
Vector of length nslab, diagonal precision term on s.
b : WRITEME
Vector of length nhid, hidden unit bias.
B : WRITEME
Vector of length nvis, diagonal precision on v. Lambda in ICML2011
paper.
Lambda : WRITEME
Matrix of shape nvis x nhid, whose i-th column encodes a diagonal
precision on v, conditioned on h_i. phi in ICML2011 paper.
log_alpha : WRITEME
Vector of length nslab, precision on s.
mu : WRITEME
Vector of length nslab, mean parameter on s.
W : WRITEME
Matrix of shape nvis x nslab, weights of the nslab linear filters s.
"""
def __init__(self, nvis, nhid, n_s_per_h,
batch_size,
alpha0, alpha_irange,
b0,
B0,
Lambda0, Lambda_irange,
mu0,
W_irange=None,
rng=None):
rng = make_np_rng(rng, 1001, which_method="rand")
self.nhid = nhid
self.nslab = nhid * n_s_per_h
self.n_s_per_h = n_s_per_h
self.nvis = nvis
self.batch_size = batch_size
# configure \alpha: precision parameter on s
alpha_init = numpy.zeros(self.nslab) + alpha0
if alpha_irange > 0:
alpha_init += (2 * rng.rand(self.nslab) - 1) * alpha_irange
self.log_alpha = sharedX(numpy.log(alpha_init), name='log_alpha')
self.alpha = tensor.exp(self.log_alpha)
self.alpha.name = 'alpha'
self.mu = sharedX(
numpy.zeros(self.nslab) + mu0,
name='mu', borrow=True)
self.b = sharedX(
numpy.zeros(self.nhid) + b0,
name='b', borrow=True)
if W_irange is None:
# Derived closed to Xavier Glorot's magic formula
W_irange = 2 / numpy.sqrt(nvis * nhid)
self.W = sharedX(
(.5 - rng.rand(self.nvis, self.nslab)) * 2 * W_irange,
name='W', borrow=True)
# THE BETA IS IGNORED DURING TRAINING - FIXED AT MARGINAL DISTRIBUTION
self.B = sharedX(numpy.zeros(self.nvis) + B0, name='B', borrow=True)
if Lambda_irange > 0:
L = (rng.rand(self.nvis, self.nhid) * Lambda_irange
+ Lambda0)
else:
L = numpy.zeros((self.nvis, self.nhid)) + Lambda0
self.Lambda = sharedX(L, name='Lambda', borrow=True)
self._params = [
self.mu,
self.B,
self.Lambda,
self.W,
self.b,
self.log_alpha]
#def ml_gradients(self, pos_v, neg_v):
# inherited version is OK.
def gibbs_step_for_v(self, v, rng):
"""
.. todo::
WRITEME
"""
# Sometimes, the number of examples in the data set is not a
# multiple of self.batch_size.
batch_size = v.shape[0]
# sample h given v
h_mean = self.mean_h_given_v(v)
h_mean_shape = (batch_size, self.nhid)
h_sample = rng.binomial(size=h_mean_shape,
n = 1, p = h_mean, dtype = h_mean.dtype)
# sample s given (v,h)
s_mu, s_var = self.mean_var_s_given_v_h1(v)
s_mu_shape = (batch_size, self.nslab)
s_sample = s_mu + rng.normal(size=s_mu_shape) * tensor.sqrt(s_var)
#s_sample=(s_sample.reshape()*h_sample.dimshuffle(0,1,'x')).flatten(2)
# sample v given (s,h)
v_mean, v_var = self.mean_var_v_given_h_s(h_sample, s_sample)
v_mean_shape = (batch_size, self.nvis)
v_sample = rng.normal(size=v_mean_shape) * tensor.sqrt(v_var) + v_mean
del batch_size
return v_sample, locals()
## TODO?
def sample_visibles(self, params, shape, rng):
"""
.. todo::
WRITEME
"""
raise NotImplementedError('mu_pooled_ssRBM.sample_visibles')
def input_to_h_from_v(self, v):
"""
.. todo::
WRITEME
"""
D = self.Lambda
alpha = self.alpha
def sum_s(x):
return x.reshape((
-1,
self.nhid,
self.n_s_per_h)).sum(axis=2)
return tensor.add(
self.b,
-0.5 * tensor.dot(v * v, D),
sum_s(self.mu * tensor.dot(v, self.W)),
sum_s(0.5 * tensor.sqr(tensor.dot(v, self.W)) / alpha))
#def mean_h_given_v(self, v):
# inherited version is OK:
# return nnet.sigmoid(self.input_to_h_from_v(v))
def mean_var_v_given_h_s(self, h, s):
"""
.. todo::
WRITEME
"""
v_var = 1 / (self.B + tensor.dot(h, self.Lambda.T))
s3 = s.reshape((
-1,
self.nhid,
self.n_s_per_h))
hs = h.dimshuffle(0, 1, 'x') * s3
v_mu = tensor.dot(hs.flatten(2), self.W.T) * v_var
return v_mu, v_var
def mean_var_s_given_v_h1(self, v):
"""
.. todo::
WRITEME
"""
alpha = self.alpha
return (self.mu + tensor.dot(v, self.W) / alpha,
1.0 / alpha)
## TODO?
def mean_v_given_h(self, h):
"""
.. todo::
WRITEME
"""
raise NotImplementedError('mu_pooled_ssRBM.mean_v_given_h')
def free_energy_given_v(self, v):
"""
.. todo::
WRITEME
"""
sigmoid_arg = self.input_to_h_from_v(v)
return tensor.add(
0.5 * (self.B * (v ** 2)).sum(axis=1),
-tensor.nnet.softplus(sigmoid_arg).sum(axis=1))
#def __call__(self, v):
# inherited version is OK
#def reconstruction_error:
# inherited version should be OK
#def params(self):
# inherited version is OK.
def build_stacked_RBM(nvis, nhids, batch_size, vis_type='binary',
input_mean_vis=None, irange=1e-3, rng=None):
"""
.. todo::
WRITEME properly
Note from IG:
This method doesn't seem to work correctly with Gaussian RBMs.
In general, this is a difficult function to support, because it
needs to pass the write arguments to the constructor of many kinds
of RBMs. It would probably be better to just construct an instance
of pylearn2.models.mlp.MLP with its hidden layers set to instances
of pylearn2.models.mlp.RBM_Layer. If anyone is working on this kind
of problem, a PR replacing this function with a helper function to
make such an MLP would be very welcome.
Allocate a StackedBlocks containing RBMs.
The visible units of the input RBM can be either binary or gaussian,
the other ones are all binary.
"""
#TODO: not sure this is the right way of dealing with mean_vis.
layers = []
assert vis_type in ['binary', 'gaussian']
if vis_type == 'binary':
assert input_mean_vis is None
elif vis_type == 'gaussian':
assert input_mean_vis in (True, False)
# The number of visible units in each layer is the initial input
# size and the first k-1 hidden unit sizes.
nviss = [nvis] + nhids[:-1]
seq = izip(
xrange(len(nhids)),
nhids,
nviss,
)
for k, nhid, nvis in seq:
if k == 0 and vis_type == 'gaussian':
rbm = GaussianBinaryRBM(nvis=nvis, nhid=nhid,
batch_size=batch_size,
irange=irange,
rng=rng,
mean_vis=input_mean_vis)
else:
rbm = RBM(nvis - nvis, nhid=nhid,
batch_size=batch_size,
irange=irange,
rng=rng)
layers.append(rbm)
# Create the stack
return StackedBlocks(layers)
class L1_ActivationCost(Cost):
"""
.. todo::
WRITEME
Parameters
----------
target : WRITEME
eps : WRITEME
coeff : WRITEME
"""
def __init__(self, target, eps, coeff):
self.__dict__.update(locals())
del self.self
def expr(self, model, data, ** kwargs):
"""
.. todo::
WRITEME
"""
self.get_data_specs(model)[0].validate(data)
X = data
H = model.P_H_given_V(X)
h = H.mean(axis=0)
err = abs(h - self.target)
dead = T.maximum(err - self.eps, 0.)
assert dead.ndim == 1
rval = self.coeff * dead.mean()
return rval
def get_data_specs(self, model):
"""
.. todo::
WRITEME
"""
return (model.get_input_space(), model.get_input_source())
# The following functionality was deprecated, but is evidently
# still needed to make the RBM work
class _Optimizer(object):
"""
Basic abstract class for computing parameter updates of a model.
"""
def updates(self):
"""Return symbolic updates to apply."""
raise NotImplementedError()
class _SGDOptimizer(_Optimizer):
"""
Compute updates by stochastic gradient descent on mini-batches.
Supports constant learning rates, or decreasing like 1/t after an initial
period.
Parameters
----------
params : object or list
Either a Model object with a .get_params() method, or a list of
parameters to be optimized.
base_lr : float
The base learning rate before annealing or parameter-specific
scaling.
anneal_start : int, optional
Number of steps after which to start annealing the learning
rate at a 1/t schedule, where t is the number of stochastic
gradient updates.
use_adagrad : bool, optional
'adagrad' adaptive learning rate scheme is used. If set to True,
base_lr is used as e0.
kwargs : dict
WRITEME
Notes
-----
The formula to compute the effective learning rate on a parameter is:
<paramname>_lr * max(0.0, min(base_lr, lr_anneal_start/(iteration+1)))
Parameter-specific learning rates can be set by passing keyword
arguments <name>_lr, where name is the .name attribute of a given
parameter.
Parameter-specific bounding values can be specified by passing
keyword arguments <param>_clip, which should be a (min, max) pair.
Adagrad is recommended with sparse inputs. It normalizes the base
learning rate of a parameter theta_i by the accumulated 2-norm of its
gradient: e{ti} = e0 / sqrt( sum_t (dL_t / dtheta_i)^2 )
"""
def __init__(self, params, base_lr, anneal_start=None, use_adagrad=False,
** kwargs):
if hasattr(params, '__iter__'):
self.params = params
elif hasattr(params, 'get_params') and hasattr(
params.get_params, '__call__'):
self.params = params.get_params()
else:
raise ValueError("SGDOptimizer couldn't figure out what to do "
"with first argument: '%s'" % str(params))
if anneal_start == None:
self.anneal_start = None
else:
self.anneal_start = as_floatX(anneal_start)
# Create accumulators and epsilon0's
self.use_adagrad = use_adagrad
if self.use_adagrad:
self.accumulators = {}
self.e0s = {}
for param in self.params:
self.accumulators[param] = theano.shared(
value=as_floatX(0.), name='acc_%s' % param.name)
self.e0s[param] = as_floatX(base_lr)
# Set up the clipping values
self.clipping_values = {}
# Keep track of names already seen
clip_names_seen = set()
for parameter in self.params:
clip_name = '%s_clip' % parameter.name
if clip_name in kwargs:
if clip_name in clip_names_seen:
logger.warning('In SGDOptimizer, at least two parameters '
'have the same name. Both will be affected '
'by the keyword argument '
'{0}.'.format(clip_name))
clip_names_seen.add(clip_name)
p_min, p_max = kwargs[clip_name]
assert p_min <= p_max
self.clipping_values[parameter] = (p_min, p_max)
# Check that no ..._clip keyword is being ignored
for clip_name in clip_names_seen:
kwargs.pop(clip_name)
for kw in kwargs.iterkeys():
if kw[-5:] == '_clip':
logger.warning('In SGDOptimizer, keyword argument {0} '
'will be ignored, because no parameter '
'was found with name {1}.'.format(kw, kw[:-5]))
self.learning_rates_setup(base_lr, **kwargs)
def learning_rates_setup(self, base_lr, **kwargs):
"""
Initializes parameter-specific learning rate dictionary and shared
variables for the annealed base learning rate and iteration number.
Parameters
----------
base_lr : float
The base learning rate before annealing or parameter-specific
scaling.
kwargs : dict
WRITEME
Notes
-----
Parameter-specific learning rates can be set by passing keyword
arguments <name>_lr, where name is the .name attribute of a given
parameter.
"""
# Take care of learning rate scales for individual parameters
self.learning_rates = {}
# Base learning rate per example.
self.base_lr = theano._asarray(base_lr, dtype=theano.config.floatX)
# Keep track of names already seen
lr_names_seen = set()
for parameter in self.params:
lr_name = '%s_lr' % parameter.name
if lr_name in lr_names_seen:
logger.warning('In SGDOptimizer, '
'at least two parameters have the same name. '
'Both will be affected by the keyword argument '
'{0}.'.format(lr_name))
lr_names_seen.add(lr_name)
thislr = kwargs.get(lr_name, 1.)
self.learning_rates[parameter] = sharedX(thislr, lr_name)
# Verify that no ..._lr keyword argument is ignored
for lr_name in lr_names_seen:
if lr_name in kwargs:
kwargs.pop(lr_name)
for kw in kwargs.iterkeys():
if kw[-3:] == '_lr':
logger.warning('In SGDOptimizer, keyword argument {0} '
'will be ignored, because no parameter '
'was found with name {1}.'.format(kw, kw[:-3]))
# A shared variable for storing the iteration number.
self.iteration = sharedX(theano._asarray(0, dtype='int32'),
name='iter')
# A shared variable for storing the annealed base learning rate, used
# to lower the learning rate gradually after a certain amount of time.
self.annealed = sharedX(base_lr, 'annealed')
def learning_rate_updates(self, gradients):
"""
Compute a dictionary of shared variable updates related to annealing
the learning rate.
Parameters
----------
gradients : WRITEME
Returns
-------
updates : dict
A dictionary with the shared variables representing SGD metadata
as keys and a symbolic expression of how they are to be updated as
values.
"""
ups = {}
if self.use_adagrad:
learn_rates = []
for param, gp in zip(self.params, gradients):
acc = self.accumulators[param]
ups[acc] = acc + (gp ** 2).sum()
learn_rates.append(self.e0s[param] / (ups[acc] ** .5))
else:
# Annealing coefficient. Here we're using a formula of
# min(base_lr, anneal_start / (iteration + 1))
if self.anneal_start is None:
annealed = sharedX(self.base_lr)
else:
frac = self.anneal_start / (self.iteration + 1.)
annealed = tensor.minimum(
as_floatX(frac),
self.base_lr # maximum learning rate
)
# Update the shared variable for the annealed learning rate.
ups[self.annealed] = annealed
ups[self.iteration] = self.iteration + 1
# Calculate the learning rates for each parameter, in the order
# they appear in self.params
learn_rates = [annealed * self.learning_rates[p] for p in
self.params]
return ups, learn_rates
def updates(self, gradients):
"""
Return symbolic updates to apply given a set of gradients
on the parameters being optimized.
Parameters
----------
gradients : list of tensor_likes
List of symbolic gradients for the parameters contained
in self.params, in the same order as in self.params.
Returns
-------
updates : dict
A dictionary with the shared variables in self.params as keys
and a symbolic expression of how they are to be updated each
SGD step as values.
Notes
-----
`cost_updates` is a convenient helper function that takes all
necessary gradients with respect to a given symbolic cost.
"""
ups = {}
# Add the learning rate/iteration updates
l_ups, learn_rates = self.learning_rate_updates(gradients)
safe_update(ups, l_ups)
# Get the updates from sgd_updates, a PyLearn library function.
p_up = dict(self.sgd_updates(self.params, gradients, learn_rates))
# Add the things in p_up to ups
safe_update(ups, p_up)
# Clip the values if needed.
# We do not want the clipping values to force an upcast
# of the update: updates should have the same type as params
for param, (p_min, p_max) in self.clipping_values.iteritems():
p_min = tensor.as_tensor(p_min)
p_max = tensor.as_tensor(p_max)
dtype = param.dtype
if p_min.dtype != dtype:
p_min = tensor.cast(p_min, dtype)
if p_max.dtype != dtype:
p_max = tensor.cast(p_max, dtype)
ups[param] = tensor.clip(ups[param], p_min, p_max)
# Return the updates dictionary.
return ups
def cost_updates(self, cost):
"""
Return symbolic updates to apply given a cost function.
Parameters
----------
cost : tensor_like
Symbolic cost with respect to which the gradients of
the parameters should be taken. Should be 0-dimensional
(scalar valued).
Returns
-------
updates : dict
A dictionary with the shared variables in self.params as keys
and a symbolic expression of how they are to be updated each
SGD step as values.
"""
grads = [tensor.grad(cost, p) for p in self.params]
return self.updates(gradients=grads)
def sgd_updates(self, params, grads, stepsizes):
"""
Return a list of (pairs) that can be used
as updates in theano.function to
implement stochastic gradient descent.
Parameters
----------
params : list of Variable
variables to adjust in order to minimize some cost
grads : list of Variable
the gradient on each param (with respect to some cost)
stepsizes : symbolic scalar or list of one symbolic scalar per param
step by this amount times the negative gradient on each iteration
"""
try:
iter(stepsizes)
except Exception:
stepsizes = [stepsizes for p in params]
if len(params) != len(grads):
raise ValueError('params and grads have different lens')
updates = [(p, p - step * gp) for (step, p, gp)
in zip(stepsizes, params, grads)]
return updates
def sgd_momentum_updates(self, params, grads, stepsizes, momentum=0.9):
"""
.. todo::
WRITEME
"""
# if stepsizes is just a scalar, expand it to match params
try:
iter(stepsizes)
except Exception:
stepsizes = [stepsizes for p in params]
try:
iter(momentum)
except Exception:
momentum = [momentum for p in params]
if len(params) != len(grads):
raise ValueError('params and grads have different lens')
headings = [theano.shared(numpy.zeros_like(p.get_value(borrow=True)))
for p in params]
updates = []
for s, p, gp, m, h in zip(stepsizes, params, grads, momentum,
headings):
updates.append((p, p + s * h))
updates.append((h, m * h - (1.0 - m) * gp))
return updates
| kastnerkyle/pylearn2 | pylearn2/models/rbm.py | Python | bsd-3-clause | 58,387 | [
"Gaussian"
] | 189a6b7eadb429e03f212a96018635fc29d237bd6173ebb45ee964881785a3a5 |
import os
from setuptools import setup
def get_version():
v = "0.0.0"
with open('treetime/__init__.py') as ifile:
for line in ifile:
if line[:7]=='version':
v = line.split('=')[-1].strip()[1:-1]
break
return v
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
name = "phylo-treetime",
version = get_version(),
author = "Pavel Sagulenko, Emma Hodcroft, and Richard Neher",
author_email = "richard.neher@unibas.ch",
description = ("Maximum-likelihood phylodynamic inference"),
long_description = long_description,
long_description_content_type="text/markdown",
license = "MIT",
keywords = "Time-stamped phylogenies, phylogeography, virus evolution",
url = "https://github.com/neherlab/treetime",
packages=['treetime'],
install_requires = [
'biopython>=1.67,!=1.77,!=1.78',
'numpy>=1.10.4',
'pandas>=0.17.1',
'scipy>=0.16.1'
],
extras_require = {
':python_version < "3.6"':['matplotlib>=2.0, ==2.*'],
':python_version >= "3.6"':['matplotlib>=2.0'],
},
classifiers=[
"Development Status :: 3 - Alpha",
"Topic :: Scientific/Engineering :: Bio-Informatics",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8"
],
scripts=['bin/treetime']
)
| neherlab/treetime | setup.py | Python | mit | 1,687 | [
"Biopython"
] | 6835b5b0768e5c697c9c0ae9329dd72ea0b1606a242ceb5fec24913ecdaeecfa |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
from __future__ import division, print_function, unicode_literals
import os
import h5py
import brainstorm as bs
from brainstorm.data_iterators import Minibatches
from brainstorm.handlers import PyCudaHandler
from time import time
bs.global_rnd.set_seed(42)
# ---------------------------- Set up Iterators ----------------------------- #
data_dir = os.environ.get('BRAINSTORM_DATA_DIR', 'data')
data_file = os.path.join(data_dir, 'MNIST.hdf5')
ds = h5py.File(data_file, 'r')['normalized_split']
x_tr, y_tr = ds['training']['default'][:], ds['training']['targets'][:]
x_va, y_va = ds['validation']['default'][:], ds['validation']['targets'][:]
batch_size = 100
getter_tr = Minibatches(batch_size, default=x_tr, targets=y_tr)
getter_va = Minibatches(batch_size, default=x_va, targets=y_va)
# ----------------------------- Set up Network ------------------------------ #
inp, fc = bs.tools.get_in_out_layers('classification', (28, 28, 1), 10, projection_name='FC')
network = bs.Network.from_layer(
inp >>
bs.layers.Dropout(drop_prob=0.2) >>
bs.layers.FullyConnected(1200, name='Hid1', activation='rel') >>
bs.layers.Dropout(drop_prob=0.5) >>
bs.layers.FullyConnected(1200, name='Hid2', activation='rel') >>
bs.layers.Dropout(drop_prob=0.5) >>
fc
)
# Uncomment next line to use GPU
network.set_handler(PyCudaHandler())
network.initialize(bs.initializers.Gaussian(0.01))
network.set_weight_modifiers({"FC": bs.value_modifiers.ConstrainL2Norm(1)})
# ----------------------------- Set up Trainer ------------------------------ #
trainer = bs.Trainer(bs.training.MomentumStepper(learning_rate=0.1, momentum=0.9))
trainer.add_hook(bs.hooks.ProgressBar())
scorers = [bs.scorers.Accuracy(out_name='Output.outputs.predictions')]
trainer.add_hook(bs.hooks.MonitorScores('valid_getter', scorers,
name='validation'))
trainer.add_hook(bs.hooks.SaveBestNetwork('validation.Accuracy',
filename='mnist_pi_best.hdf5',
name='best weights',
criterion='max'))
trainer.add_hook(bs.hooks.StopAfterEpoch(20))
# -------------------------------- Train ------------------------------------ #
start_time = time()
trainer.train(network, getter_tr, valid_getter=getter_va)
print("The training took %.4f seconds." % (time() - start_time))
print("Best validation accuracy:", max(trainer.logs["validation"]["Accuracy"]))
| pinae/MNIST-Brainstorm | train.py | Python | gpl-3.0 | 2,537 | [
"Gaussian"
] | 7477e4bba5ad6fb506284227c65698e332eb06c1edf9a91abf7a501160ce6bc2 |
"""
This module finds diffusion paths through a structure based on a given
potential field.
If you use PathFinder algorithm for your research, please consider citing the
following work::
Ziqin Rong, Daniil Kitchaev, Pieremanuele Canepa, Wenxuan Huang, Gerbrand
Ceder, The Journal of Chemical Physics 145 (7), 074112
"""
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
import numpy as np
import numpy.linalg as la
import scipy.signal
import scipy.stats
from scipy.interpolate import interp1d
import math
import six
from abc import ABCMeta, abstractmethod
from pymatgen.core.structure import Structure
from pymatgen.core.lattice import Lattice
from pymatgen.core.sites import *
from pymatgen.core.periodic_table import *
from pymatgen.io.vasp.inputs import Poscar
from pymatgen.io.vasp.outputs import VolumetricData, Chgcar
__author__ = "Daniil Kitchaev"
__version__ = "1.0"
__maintainer__ = "Daniil Kitchaev, Ziqin Rong"
__email__ = "dkitch@mit.edu, rongzq08@mit.edu"
__status__ = "Development"
__date__ = "March 17, 2015"
class NEBPathfinder:
def __init__(self, start_struct, end_struct, relax_sites, v, n_images=20):
"""
General pathfinder for interpolating between two structures, where the
interpolating path is calculated with the elastic band method with
respect to the given static potential for sites whose indices are given
in relax_sites, and is linear otherwise.
Args:
start_struct, end_struct: Endpoint structures to interpolate
relax_sites: List of site indices whose interpolation paths should
be relaxed
v: Static potential field to use for the elastic band relaxation
n_images: Number of interpolation images to generate
"""
self.__s1 = start_struct
self.__s2 = end_struct
self.__relax_sites = relax_sites
self.__v = v
self.__n_images = n_images
self.__images = None
self.interpolate()
def interpolate(self):
"""
Finds a set of n_images from self.s1 to self.s2, where all sites except
for the ones given in relax_sites, the interpolation is linear (as in
pymatgen.core.structure.interpolate), and for the site indices given
in relax_sites, the path is relaxed by the elastic band method within
the static potential V.
"""
images = self.__s1.interpolate(self.__s2, nimages=self.__n_images,
interpolate_lattices=False)
for site_i in self.__relax_sites:
start_f = images[0].sites[site_i].frac_coords
end_f = images[-1].sites[site_i].frac_coords
path = NEBPathfinder.string_relax(
NEBPathfinder.__f2d(start_f, self.__v),
NEBPathfinder.__f2d(end_f, self.__v),
self.__v, n_images=(self.__n_images + 1),
dr=[self.__s1.lattice.a / self.__v.shape[0],
self.__s1.lattice.b / self.__v.shape[1],
self.__s1.lattice.c / self.__v.shape[2]])
for image_i, image in enumerate(images):
image.translate_sites(site_i,
NEBPathfinder.__d2f(path[image_i],
self.__v) -
image.sites[site_i].frac_coords,
frac_coords=True, to_unit_cell=True)
self.__images = images
@property
def images(self):
"""
Returns a list of structures interpolating between the start and
endpoint structures.
"""
return self.__images
def plot_images(self, outfile):
"""
Generates a POSCAR with the calculated diffusion path with respect to the first endpoint.
:param outfile: Output file for the POSCAR
"""
sum_struct = self.__images[0].sites
for image in self.__images:
for site_i in self.__relax_sites:
sum_struct.append(PeriodicSite(image.sites[site_i].specie,
image.sites[site_i].frac_coords,
self.__images[0].lattice,
to_unit_cell=True,
coords_are_cartesian=False))
sum_struct = Structure.from_sites(sum_struct, validate_proximity=False)
p = Poscar(sum_struct)
p.write_file(outfile)
@staticmethod
def string_relax(start, end, V, n_images=25, dr=None, h=3.0, k=0.17,
min_iter=100, max_iter=10000, max_tol=5e-6):
"""
Implements path relaxation via the elastic band method. In general, the
method is to define a path by a set of points (images) connected with
bands with some elasticity constant k. The images then relax along the
forces found in the potential field V, counterbalanced by the elastic
response of the elastic band. In general the endpoints of the band can
be allowed to relax also to their local minima, but in this calculation
they are kept fixed.
Args:
start, end: Endpoints of the path calculation given in discrete
coordinates with respect to the grid in V
V: potential field through which to calculate the path
n_images: number of images used to define the path. In general
anywhere from 20 to 40 seems to be good.
dr: Conversion ratio from discrete coordinates to real coordinates
for each of the three coordinate vectors
h: Step size for the relaxation. h = 0.1 works reliably, but is
slow. h=10 diverges with large gradients but for the types of
gradients seen in CHGCARs, works pretty reliably
k: Elastic constant for the band (in real units, not discrete)
min_iter, max_iter: Number of optimization steps the string will
take before exiting (even if unconverged)
max_tol: Convergence threshold such that if the string moves by
less than max_tol in a step, and at least min_iter steps have
passed, the algorithm will terminate. Depends strongly on the
size of the gradients in V, but 5e-6 works reasonably well for
CHGCARs.
"""
#
# This code is based on the MATLAB example provided by
# Prof. Eric Vanden-Eijnden of NYU
# (http://www.cims.nyu.edu/~eve2/main.htm)
#
print(
"Getting path from {} to {} (coords wrt V grid)".format(start, end))
# Set parameters
if not dr:
dr = np.array(
[1.0 / V.shape[0], 1.0 / V.shape[1], 1.0 / V.shape[2]])
else:
dr = np.array(dr, dtype=float)
keff = k * dr * n_images
h0 = h
# Initialize string
g1 = np.linspace(0, 1, n_images)
s0 = start
s1 = end
s = np.array([g * (s1 - s0) for g in g1]) + s0
ds = s - np.roll(s, 1, axis=0)
ds[0] = (ds[0] - ds[0])
ls = np.cumsum(la.norm(ds, axis=1))
ls = ls / ls[-1]
fi = interp1d(ls, s, axis=0)
s = fi(g1)
# Evaluate initial distances (for elastic equilibrium)
ds0_plus = s - np.roll(s, 1, axis=0)
ds0_minus = s - np.roll(s, -1, axis=0)
ds0_plus[0] = (ds0_plus[0] - ds0_plus[0])
ds0_minus[-1] = (ds0_minus[-1] - ds0_minus[-1])
# Evaluate potential gradient outside the loop, as potential does not
# change per step in this approximation.
dV = np.gradient(V)
# Evolve string
for step in range(0, max_iter):
if step > min_iter:
# Gradually decay step size to prevent oscillations
h = h0 * np.exp(-2.0 * (step - min_iter) / max_iter)
else:
h = h0
# Calculate forces acting on string
d = V.shape
s0 = s
edV = np.array([[dV[0][int(pt[0]) % d[0]][int(pt[1]) % d[1]][
int(pt[2]) % d[2]] / dr[0],
dV[1][int(pt[0]) % d[0]][int(pt[1]) % d[1]][
int(pt[2]) % d[2]] / dr[0],
dV[2][int(pt[0]) % d[0]][int(pt[1]) % d[1]][
int(pt[2]) % d[2]] / dr[0]] for pt in s])
# if(step % 100 == 0):
# print(edV)
# Update according to force due to potential and string elasticity
ds_plus = s - np.roll(s, 1, axis=0)
ds_minus = s - np.roll(s, -1, axis=0)
ds_plus[0] = (ds_plus[0] - ds_plus[0])
ds_minus[-1] = (ds_minus[-1] - ds_minus[-1])
Fpot = edV
Fel = keff * (la.norm(ds_plus) - la.norm(ds0_plus)) * (
ds_plus / la.norm(ds_plus))
Fel += keff * (la.norm(ds_minus) - la.norm(ds0_minus)) * (
ds_minus / la.norm(ds_minus))
s -= h * (Fpot + Fel)
# Fix endpoints
s[0] = s0[0]
s[-1] = s0[-1]
# Reparametrize string
ds = s - np.roll(s, 1, axis=0)
ds[0] = (ds[0] - ds[0])
ls = np.cumsum(la.norm(ds, axis=1))
ls = ls / ls[-1]
fi = interp1d(ls, s, axis=0)
s = fi(g1)
tol = la.norm((s - s0) * dr) / n_images / h
if tol > 1e10:
raise ValueError(
"Pathfinding failed, path diverged! Consider reducing h to "
"avoid divergence.")
if step > min_iter and tol < max_tol:
print("Converged at step {}".format(step))
break
if step % 100 == 0:
print("Step {} - ds = {}".format(step, tol))
return s
@staticmethod
def __f2d(frac_coords, v):
"""
Converts fractional coordinates to discrete coordinates with respect to
the grid size of v
"""
# frac_coords = frac_coords % 1
return np.array([int(frac_coords[0] * v.shape[0]),
int(frac_coords[1] * v.shape[1]),
int(frac_coords[2] * v.shape[2])])
@staticmethod
def __d2f(disc_coords, v):
"""
Converts a point given in discrete coordinates withe respect to the
grid in v to fractional coordinates.
"""
return np.array([disc_coords[0] / v.shape[0],
disc_coords[1] / v.shape[1],
disc_coords[2] / v.shape[2]])
class StaticPotential(six.with_metaclass(ABCMeta)):
"""
Defines a general static potential for diffusion calculations. Implements
grid-rescaling and smearing for the potential grid. Also provides a
function to normalize the potential from 0 to 1 (recommended).
"""
def __init__(self, struct, pot):
self.__v = pot
self.__s = struct
def get_v(self):
"""
Returns the potential
"""
return self.__v
def normalize(self):
"""
Sets the potential range 0 to 1.
"""
self.__v = self.__v - np.amin(self.__v)
self.__v = self.__v / np.amax(self.__v)
def rescale_field(self, new_dim):
"""
Changes the discretization of the potential field by linear interpolation. This is necessary if the potential field
obtained from DFT is strangely skewed, or is too fine or coarse. Obeys periodic boundary conditions at the edges of
the cell. Alternatively useful for mixing potentials that originally are on different grids.
:param new_dim: tuple giving the numpy shape of the new grid
"""
v_dim = self.__v.shape
padded_v = np.lib.pad(self.__v, ((0, 1), (0, 1), (0, 1)), mode='wrap')
ogrid_list = np.array([list(c) for c in list(
np.ndindex(v_dim[0] + 1, v_dim[1] + 1, v_dim[2] + 1))])
v_ogrid = padded_v.reshape(
((v_dim[0] + 1) * (v_dim[1] + 1) * (v_dim[2] + 1), -1))
ngrid_a, ngrid_b, ngrid_c = np.mgrid[0: v_dim[0]: v_dim[0] / new_dim[0],
0: v_dim[1]: v_dim[1] / new_dim[1],
0: v_dim[2]: v_dim[2] / new_dim[2]]
v_ngrid = scipy.interpolate.griddata(ogrid_list, v_ogrid,
(ngrid_a, ngrid_b, ngrid_c),
method='linear').reshape(
(new_dim[0], new_dim[1], new_dim[2]))
self.__v = v_ngrid
def gaussian_smear(self, r):
"""
Applies an isotropic Gaussian smear of width (standard deviation) r to the potential field. This is necessary to
avoid finding paths through narrow minima or nodes that may exist in the field (although any potential or
charge distribution generated from GGA should be relatively smooth anyway). The smearing obeys periodic
boundary conditions at the edges of the cell.
:param r - Smearing width in cartesian coordinates, in the same units as the structure lattice vectors
"""
# Since scaling factor in fractional coords is not isotropic, have to have different radii in 3 directions
a_lat = self.__s.lattice.a
b_lat = self.__s.lattice.b
c_lat = self.__s.lattice.c
# Conversion factors for discretization of v
v_dim = self.__v.shape
r_frac = (r / a_lat, r / b_lat, r / c_lat)
r_disc = (int(math.ceil(r_frac[0] * v_dim[0])),
int(math.ceil(r_frac[1] * v_dim[1])),
int(math.ceil(r_frac[2] * v_dim[2])))
# Apply smearing
# Gaussian filter
gauss_dist = np.zeros(
(r_disc[0] * 4 + 1, r_disc[1] * 4 + 1, r_disc[2] * 4 + 1))
for g_a in np.arange(-2.0 * r_disc[0], 2.0 * r_disc[0] + 1, 1.0):
for g_b in np.arange(-2.0 * r_disc[1], 2.0 * r_disc[1] + 1, 1.0):
for g_c in np.arange(-2.0 * r_disc[2], 2.0 * r_disc[2] + 1,
1.0):
g = np.array(
[g_a / v_dim[0], g_b / v_dim[1], g_c / v_dim[2]]).T
gauss_dist[int(g_a + r_disc[0])][int(g_b + r_disc[1])][
int(g_c + r_disc[2])] = la.norm(
np.dot(self.__s.lattice.matrix, g)) / r
gauss = scipy.stats.norm.pdf(gauss_dist)
gauss = gauss / np.sum(gauss, dtype=float)
padded_v = np.pad(self.__v, (
(r_disc[0], r_disc[0]), (r_disc[1], r_disc[1]), (r_disc[2], r_disc[2])),
mode='wrap')
smeared_v = scipy.signal.convolve(padded_v, gauss, mode='valid')
self.__v = smeared_v
class ChgcarPotential(StaticPotential):
'''
Implements a potential field based on the charge density output from VASP.
'''
def __init__(self, chgcar, smear=False, normalize=True):
"""
:param chgcar: Chgcar object based on a VASP run of the structure of interest (Chgcar.from_file("CHGCAR"))
:param smear: Whether or not to apply a Gaussian smearing to the potential
:param normalize: Whether or not to normalize the potential to range from 0 to 1
"""
v = chgcar.data['total']
v = v / (v.shape[0] * v.shape[1] * v.shape[2])
StaticPotential.__init__(self, chgcar.structure, v)
if smear:
self.gaussian_smear(2.0)
if normalize:
self.normalize()
class FreeVolumePotential(StaticPotential):
'''
Implements a potential field based on geometric distances from atoms in the structure - basically, the potential
is lower at points farther away from any atoms in the structure.
'''
def __init__(self, struct, dim, smear=False, normalize=True):
"""
:param struct: Unit cell on which to base the potential
:param dim: Grid size for the potential
:param smear: Whether or not to apply a Gaussian smearing to the potential
:param normalize: Whether or not to normalize the potential to range from 0 to 1
"""
self.__s = struct
v = FreeVolumePotential.__add_gaussians(struct, dim)
StaticPotential.__init__(self, struct, v)
if smear:
self.gaussian_smear(2.0)
if normalize:
self.normalize()
@staticmethod
def __add_gaussians(s, dim, r=1.5):
gauss_dist = np.zeros(dim)
for a_d in np.arange(0.0, dim[0], 1.0):
for b_d in np.arange(0.0, dim[1], 1.0):
for c_d in np.arange(0.0, dim[2], 1.0):
coords_f = np.array(
[a_d / dim[0], b_d / dim[1], c_d / dim[2]])
d_f = sorted(s.get_sites_in_sphere(coords_f, s.lattice.a),
key=lambda x: x[1])[0][1]
# print(d_f)
gauss_dist[int(a_d)][int(b_d)][int(c_d)] = d_f / r
v = scipy.stats.norm.pdf(gauss_dist)
return v
class MixedPotential(StaticPotential):
"""
Implements a potential that is a weighted sum of some other potentials
"""
def __init__(self, potentials, coefficients, smear=False, normalize=True):
"""
Args:
potentials: List of objects extending the StaticPotential superclass
coefficients: Mixing weights for the elements of the potentials list
smear: Whether or not to apply a Gaussian smearing to the potential
normalize: Whether or not to normalize the potential to range from
0 to 1
"""
v = potentials[0].get_v() * coefficients[0]
s = potentials[0].__s
for i in range(1, len(potentials)):
v += potentials[i].get_v() * coefficients[i]
StaticPotential.__init__(self, s, v)
if smear:
self.gaussian_smear(2.0)
if normalize:
self.normalize()
| xhqu1981/pymatgen | pymatgen/analysis/path_finder.py | Python | mit | 18,284 | [
"Gaussian",
"VASP",
"pymatgen"
] | 16aa4826724a42dc92d8924c95b5736c1ba7d03e0e6ed8f9cf353e7d56241803 |
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Gromacs(CMakePackage):
"""GROMACS (GROningen MAchine for Chemical Simulations) is a molecular
dynamics package primarily designed for simulations of proteins, lipids
and nucleic acids. It was originally developed in the Biophysical
Chemistry department of University of Groningen, and is now maintained
by contributors in universities and research centers across the world.
GROMACS is one of the fastest and most popular software packages
available and can run on CPUs as well as GPUs. It is free, open source
released under the GNU General Public License. Starting from version 4.6,
GROMACS is released under the GNU Lesser General Public License.
"""
homepage = 'http://www.gromacs.org'
url = 'http://ftp.gromacs.org/gromacs/gromacs-5.1.2.tar.gz'
git = 'https://github.com/gromacs/gromacs.git'
version('develop', branch='master')
version('2018.2', '7087462bb08393aec4ce3192fa4cd8df')
version('2018.1', '7ee393fa3c6b7ae351d47eae2adf980e')
version('2018', '6467ffb1575b8271548a13abfba6374c')
version('2016.5', 'f41807e5b2911ccb547a3fd11f105d47')
version('2016.4', '19c8b5c85f3ec62df79d2249a3c272f8')
version('2016.3', 'e9e3a41bd123b52fbcc6b32d09f8202b')
version('5.1.5', '831fe741bcd9f1612155dffc919885f2')
version('5.1.4', 'ba2e34d59b3982603b4935d650c08040')
version('5.1.2', '614d0be372f1a6f1f36382b7a6fcab98')
variant('mpi', default=True, description='Activate MPI support')
variant('shared', default=True,
description='Enables the build of shared libraries')
variant(
'double', default=False,
description='Produces a double precision version of the executables')
variant('plumed', default=False, description='Enable PLUMED support')
variant('cuda', default=False, description='Enable CUDA support')
variant('build_type', default='RelWithDebInfo',
description='The build type to build',
values=('Debug', 'Release', 'RelWithDebInfo', 'MinSizeRel',
'Reference', 'RelWithAssert', 'Profile'))
depends_on('mpi', when='+mpi')
depends_on('plumed+mpi', when='+plumed+mpi')
depends_on('plumed~mpi', when='+plumed~mpi')
depends_on('fftw')
depends_on('cmake@2.8.8:3.9.99', type='build')
depends_on('cmake@3.4.3:3.9.99', type='build', when='@2018:')
depends_on('cuda', when='+cuda')
def patch(self):
if '+plumed' in self.spec:
self.spec['plumed'].package.apply_patch(self)
def cmake_args(self):
options = []
if '+mpi' in self.spec:
options.append('-DGMX_MPI:BOOL=ON')
if '+double' in self.spec:
options.append('-DGMX_DOUBLE:BOOL=ON')
if '~shared' in self.spec:
options.append('-DBUILD_SHARED_LIBS:BOOL=OFF')
if '+cuda' in self.spec:
options.append('-DGMX_GPU:BOOL=ON')
options.append('-DCUDA_TOOLKIT_ROOT_DIR:STRING=' +
self.spec['cuda'].prefix)
return options
| mfherbst/spack | var/spack/repos/builtin/packages/gromacs/package.py | Python | lgpl-2.1 | 4,322 | [
"Gromacs"
] | 3347ec9508baed197390aefe30c6324bd15b5f07d49fe8074c99a6f04b4a2422 |
# This file is part of BurnMan - a thermoelastic and thermodynamic toolkit for the Earth and Planetary Sciences
# Copyright (C) 2012 - 2015 by the BurnMan team, released under the GNU
# GPL v2 or later.
from __future__ import absolute_import
from __future__ import print_function
import warnings
import numpy as np
from .material import Material, material_property
from . import eos
from .tools import copy_documentation
class Mineral(Material):
"""
This is the base class for all minerals. States of the mineral
can only be queried after setting the pressure and temperature
using set_state(). The method for computing properties of
the material is set using set_method(). This is done during
initialisation if the param 'equation_of_state' has been defined.
The method can be overridden later by the user.
This class is available as ``burnman.Mineral``.
If deriving from this class, set the properties in self.params
to the desired values. For more complicated materials you
can overwrite set_state(), change the params and then call
set_state() from this class.
All the material parameters are expected to be in plain SI units. This
means that the elastic moduli should be in Pascals and NOT Gigapascals,
and the Debye temperature should be in K not C. Additionally, the
reference volume should be in m^3/(mol molecule) and not in unit cell
volume and 'n' should be the number of atoms per molecule. Frequently in
the literature the reference volume is given in Angstrom^3 per unit cell.
To convert this to m^3/(mol of molecule) you should multiply by 10^(-30) *
N_a / Z, where N_a is Avogadro's number and Z is the number of formula units per
unit cell. You can look up Z in many places, including www.mindat.org
"""
def __init__(self):
Material.__init__(self)
if 'params' not in self.__dict__:
self.params = {}
if 'property_modifiers' not in self.__dict__:
self.property_modifiers = []
self.method = None
if 'equation_of_state' in self.params:
self.set_method(self.params['equation_of_state'])
if 'name' in self.params:
self.name = self.params['name']
def set_method(self, equation_of_state):
"""
Set the equation of state to be used for this mineral.
Takes a string corresponding to any of the predefined
equations of state: 'bm2', 'bm3', 'mgd2', 'mgd3', 'slb2', 'slb3',
'mt', 'hp_tmt', or 'cork'. Alternatively, you can pass a user defined
class which derives from the equation_of_state base class.
After calling set_method(), any existing derived properties
(e.g., elastic parameters or thermodynamic potentials) will be out
of date, so set_state() will need to be called again.
"""
if equation_of_state is None:
self.method = None
return
new_method = eos.create(equation_of_state)
if self.method is not None and 'equation_of_state' in self.params:
self.method = eos.create(self.params['equation_of_state'])
if type(new_method).__name__ == 'instance':
raise Exception(
"Please derive your method from object (see python old style classes)")
if self.method is not None and type(new_method) is not type(self.method):
# Warn user that they are changing the EoS
warnings.warn('Warning, you are changing the method to ' + new_method.__class__.__name__ +
' even though the material is designed to be used with the method ' +
self.method.__class__.__name__ +
'. This does not overwrite any mineral attributes', stacklevel=2)
self.reset()
self.method = new_method
# Validate the params object on the requested EOS.
try:
self.method.validate_parameters(self.params)
except Exception as e:
print('Mineral ' + self.to_string() +
' failed to validate parameters with message : \" ' + e.message + '\"')
raise
# Invalidate the cache upon resetting the method
self.reset()
def to_string(self):
"""
Returns the name of the mineral class
"""
return "'" + self.__class__.__module__.replace(".minlib_", ".") + "." + self.__class__.__name__ + "'"
def debug_print(self, indent=""):
print("%s%s" % (indent, self.to_string()))
def unroll(self):
return ([self], [1.0])
@copy_documentation(Material.set_state)
def set_state(self, pressure, temperature):
Material.set_state(self, pressure, temperature)
self._property_modifiers = eos.property_modifiers.calculate_property_modifications(
self)
if self.method is None:
raise AttributeError(
"no method set for mineral, or equation_of_state given in mineral.params")
"""
Properties from equations of state
We choose the P, T properties (e.g. Gibbs(P, T) rather than Helmholtz(V, T)),
as it allows us to more easily apply corrections to the free energy
"""
@material_property
@copy_documentation(Material.molar_gibbs)
def molar_gibbs(self):
return self.method.gibbs_free_energy(self.pressure, self.temperature, self.molar_volume, self.params) \
+ self._property_modifiers['G']
@material_property
def _molar_volume_unmodified(self):
return self.method.volume(self.pressure, self.temperature, self.params)
@material_property
@copy_documentation(Material.molar_volume)
def molar_volume(self):
return self._molar_volume_unmodified \
+ self._property_modifiers['dGdP']
@material_property
@copy_documentation(Material.molar_entropy)
def molar_entropy(self):
return self.method.entropy(self.pressure, self.temperature, self.molar_volume, self.params) \
- self._property_modifiers['dGdT']
@material_property
@copy_documentation(Material.isothermal_bulk_modulus)
def isothermal_bulk_modulus(self):
K_T_orig = self.method.isothermal_bulk_modulus(
self.pressure, self.temperature,
self.molar_volume, self.params)
return self.molar_volume \
/ ((self._molar_volume_unmodified / K_T_orig) - self._property_modifiers['d2GdP2'])
@material_property
@copy_documentation(Material.heat_capacity_p)
def heat_capacity_p(self):
return self.method.heat_capacity_p(self.pressure, self.temperature,
self.molar_volume, self.params) \
- self.temperature * self._property_modifiers['d2GdT2']
@material_property
@copy_documentation(Material.thermal_expansivity)
def thermal_expansivity(self):
return (
(self.method.thermal_expansivity(self.pressure, self.temperature,
self.molar_volume, self.params)
* self._molar_volume_unmodified)
+ self._property_modifiers['d2GdPdT']) / self.molar_volume
@material_property
@copy_documentation(Material.shear_modulus)
def shear_modulus(self):
G = self.method.shear_modulus(self.pressure, self.temperature, self.molar_volume, self.params)
if G < np.finfo('float').eps:
warnings.warn('Warning, shear modulus is zero. '
'If this mineral is not a liquid, then shear modulus calculations '
'for the {0} equation of state have not been implemented.'.format(self.method.__class__.__name__), stacklevel=2)
return G
"""
Properties from mineral parameters,
Legendre transformations
or Maxwell relations
"""
@material_property
@copy_documentation(Material.molar_mass)
def molar_mass(self):
if 'molar_mass' in self.params:
return self.params['molar_mass']
else:
raise ValueError(
"No molar_mass parameter for mineral " + self.to_string + ".")
@material_property
@copy_documentation(Material.density)
def density(self):
return self.molar_mass / self.molar_volume
@material_property
@copy_documentation(Material.internal_energy)
def internal_energy(self):
return self.molar_gibbs - self.pressure * self.molar_volume + self.temperature * self.molar_entropy
@material_property
@copy_documentation(Material.molar_helmholtz)
def molar_helmholtz(self):
return self.molar_gibbs - self.pressure * self.molar_volume
@material_property
@copy_documentation(Material.molar_enthalpy)
def molar_enthalpy(self):
return self.molar_gibbs + self.temperature * self.molar_entropy
@material_property
@copy_documentation(Material.adiabatic_bulk_modulus)
def adiabatic_bulk_modulus(self):
if self.temperature < 1.e-10:
return self.isothermal_bulk_modulus
else:
return self.isothermal_bulk_modulus * self.heat_capacity_p / self.heat_capacity_v
@material_property
@copy_documentation(Material.isothermal_compressibility)
def isothermal_compressibility(self):
return 1. / self.isothermal_bulk_modulus
@material_property
@copy_documentation(Material.adiabatic_compressibility)
def adiabatic_compressibility(self):
return 1. / self.adiabatic_bulk_modulus
@material_property
@copy_documentation(Material.p_wave_velocity)
def p_wave_velocity(self):
return np.sqrt((self.adiabatic_bulk_modulus + 4. / 3. *
self.shear_modulus) / self.density)
@material_property
@copy_documentation(Material.bulk_sound_velocity)
def bulk_sound_velocity(self):
return np.sqrt(self.adiabatic_bulk_modulus / self.density)
@material_property
@copy_documentation(Material.shear_wave_velocity)
def shear_wave_velocity(self):
return np.sqrt(self.shear_modulus / self.density)
@material_property
@copy_documentation(Material.grueneisen_parameter)
def grueneisen_parameter(self):
return self.method.grueneisen_parameter(self.pressure, self.temperature, self.molar_volume, self.params)
@material_property
@copy_documentation(Material.heat_capacity_v)
def heat_capacity_v(self):
return self.heat_capacity_p - self.molar_volume * self.temperature \
* self.thermal_expansivity * self.thermal_expansivity \
* self.isothermal_bulk_modulus
| ian-r-rose/burnman | burnman/mineral.py | Python | gpl-2.0 | 10,662 | [
"Avogadro"
] | 6efcfeaa1b45f97e74a2ff20d8a43a3223b8871a921dece311ff077a43b3e44c |
import pytest
import graphviz
from graphviz import parameters
VERIFY_FUNCS = [parameters.verify_engine,
parameters.verify_format,
parameters.verify_renderer,
parameters.verify_formatter]
@pytest.mark.parametrize(
'cls', [graphviz.Graph, graphviz.Digraph, graphviz.Source])
def test_parameters(cls, engine='patchwork', format='tiff',
renderer='map', formatter='core'):
args = [''] if cls is graphviz.Source else []
dot = cls(*args,
engine=engine, format=format,
renderer=renderer, formatter=formatter)
assert isinstance(dot, cls)
assert type(dot) is cls
assert dot.engine == engine
assert dot.format == format
assert dot.renderer == renderer
assert dot.formatter == formatter
dot_copy = dot.copy()
assert dot_copy is not dot
assert isinstance(dot_copy, cls)
assert type(dot_copy) is cls
assert dot.engine == engine
assert dot.format == format
assert dot_copy.renderer == renderer
assert dot_copy.formatter == formatter
@pytest.mark.parametrize(
'verify_func', VERIFY_FUNCS)
def test_verify_parameter_raises_unknown(verify_func):
with pytest.raises(ValueError, match=r'unknown .*\(must be .*one of'):
verify_func('Brian!')
@pytest.mark.parametrize(
'verify_func', VERIFY_FUNCS)
def test_verify_parameter_none_required_false_passes(verify_func):
assert verify_func(None, required=False) is None
@pytest.mark.parametrize(
'verify_func', VERIFY_FUNCS)
def test_verify_parameter_none_required_raises_missing(verify_func):
with pytest.raises(ValueError, match=r'missing'):
verify_func(None, required=True)
| xflr6/graphviz | tests/test_parameters.py | Python | mit | 1,718 | [
"Brian"
] | 436f1218645b0f5f3936d57310db0cca08a4aadb108af20e34f76b78ea2baae9 |
"""
Flightplan loader plugin for X-Plane 10 v1.0.0
This plugin let you type your flight plan into the FMS.
For more informations, please refer to the README.md file.
This file is released under the GNU GPL v2 licence.
Author: Claudio Nicolotti - https://github.com/nico87/
If you want to update this plugin or create your own fork please visit https://github.com/nico87/xp-plugins-fmsloader
"""
from XPLMDefs import *
from XPLMProcessing import *
from XPLMDataAccess import *
from XPLMUtilities import *
from XPLMPlanes import *
from XPLMNavigation import *
from SandyBarbourUtilities import *
from PythonScriptMessaging import *
from XPLMPlugin import *
from XPLMMenus import *
from XPWidgetDefs import *
from XPWidgets import *
from XPStandardWidgets import *
import re
VERSION = "1.0.0"
class PythonInterface:
def XPluginStart(self):
self.Name = "FMS Loader - " + VERSION
self.Sig = "claudionicolotti.xplane.fmsloader"
self.Desc = "FMS Loader Tool"
self.window = False
self.ufmcPlansPath = False
# Main menu
self.Cmenu = self.mmenuCallback
self.mPluginItem = XPLMAppendMenuItem(XPLMFindPluginsMenu(), 'FMS Loader', 0, 1)
self.mMain = XPLMCreateMenu(self, 'FMS Loader', XPLMFindPluginsMenu(), self.mPluginItem, self.Cmenu, 0)
self.mNewPlan = XPLMAppendMenuItem(self.mMain, 'Insert plan', False, 1)
return self.Name, self.Sig, self.Desc
def XPluginStop(self):
XPLMDestroyMenu(self, self.mMain)
if (self.window):
XPDestroyWidget(self, self.WindowWidget, 1)
pass
def XPluginEnable(self):
return 1
def XPluginDisable(self):
pass
def XPluginReceiveMessage(self, inFromWho, inMessage, inParam):
pass
def mmenuCallback(self, menuRef, menuItem):
# Start/Stop menuitem
if menuItem == self.mNewPlan:
if (not self.window):
self.CreateWindow(221, 640, 900, 45)
self.window = True
else:
if(not XPIsWidgetVisible(self.WindowWidget)):
XPSetWidgetDescriptor(self.errorCaption, '')
XPShowWidget(self.WindowWidget)
XPSetKeyboardFocus(self.routeInput)
def CreateWindow(self, x, y, w, h):
x2 = x + w
y2 = y - h - 100
Buffer = "FMS Loader"
# Create the Main Widget window
self.WindowWidget = XPCreateWidget(x, y, x2, y2, 1, Buffer, 1, 0, xpWidgetClass_MainWindow)
# Config Sub Window, style
subw = XPCreateWidget(x+10, y-30, x2-20 + 10, y2+40 -25, 1, "", 0,self.WindowWidget, xpWidgetClass_SubWindow)
XPSetWidgetProperty(subw, xpProperty_SubWindowType, xpSubWindowStyle_SubWindow)
x += 25
y -= 20
# Add Close Box decorations to the Main Widget
XPSetWidgetProperty(self.WindowWidget, xpProperty_MainWindowHasCloseBoxes, 1)
# Load route button
self.RouteButton = XPCreateWidget(x2 - 150, y-50, x2-70, y-72, 1, "To XP FMC", 0, self.WindowWidget, xpWidgetClass_Button)
x2 = 0
XPSetWidgetProperty(self.RouteButton, xpProperty_ButtonType, xpPushButton)
# Route input
self.routeInput = XPCreateWidget(x+20, y-50, x+660, y-72, 1, "", 0, self.WindowWidget, xpWidgetClass_TextField)
XPSetWidgetProperty(self.routeInput, xpProperty_TextFieldType, xpTextEntryField)
XPSetWidgetProperty(self.routeInput, xpProperty_Enabled, 1)
y -= 40
# Error caption
self.errorCaption = XPCreateWidget(x+20, y-70, x+300, y-90, 1, '', 0, self.WindowWidget, xpWidgetClass_Caption)
# Register our widget handler
self.WindowHandlerrCB = self.WindowHandler
XPAddWidgetCallback(self, self.WindowWidget, self.WindowHandlerrCB)
# set focus
XPSetKeyboardFocus(self.routeInput)
pass
def WindowHandler(self, inMessage, inWidget, inParam1, inParam2):
if (inMessage == xpMessage_CloseButtonPushed):
if (self.window):
XPHideWidget(self.WindowWidget)
return 1
# Handle any button pushes
if (inMessage == xpMsg_PushButtonPressed):
XPSetWidgetDescriptor(self.errorCaption, '')
buff = []
XPGetWidgetDescriptor(self.routeInput, buff, 256)
param = buff[0].strip().split(' ')
if (inParam1 == self.RouteButton and len(param) > 0):
# Get te actual coordinates
latDataRef = XPLMFindDataRef("sim/flightmodel/position/lat_ref")
lonDataRef = XPLMFindDataRef("sim/flightmodel/position/lon_ref")
lat = XPLMGetDataf(latDataRef)
lon = XPLMGetDataf(lonDataRef)
# Clear the current flight plan
XPLMSetDestinationFMSEntry(0)
XPLMSetDisplayedFMSEntry(0)
for r in range(XPLMCountFMSEntries(), 0, -1):
XPLMClearFMSEntry(r)
# Load the navaids
i = 0
pattern = re.compile('(N|S)([0-9]{5})(E|W)([0-9]{5})')
for navaid in param:
result = pattern.match(navaid)
if (result):
# It's a custom point
lat = float(result.group(2)) / 100
if (result.group(1) == "S"):
lat = lat * -1
lon = float(result.group(4)) / 100
if (result.group(1) == "W"):
lon = lon * -1
XPLMSetFMSEntryLatLon(i, lat, lon, 0)
i += 1
else:
# It's a regular navaid
# NAVAID_TYPES = xplm_Nav_Airport + xplm_Nav_NDB + xplm_Nav_VOR + xplm_Nav_Fix + xplm_Nav_DME
type = xplm_Nav_Fix
if (len(navaid) == 4):
type = xplm_Nav_Airport
elif (len(navaid) == 3):
type = xplm_Nav_NDB + xplm_Nav_VOR + xplm_Nav_DME
nref = XPLMFindNavAid(None, navaid, lat, lon, None, type)
found = False
while (nref != XPLM_NAV_NOT_FOUND and not found):
# Get some infos
xlat = []
xlon = []
outID = []
XPLMGetNavAidInfo(nref, None, xlat, xlon, None, None, None, outID, None, None)
if (outID[0] == navaid):
lat = xlat[0]
lon = xlon[0]
XPLMSetFMSEntryInfo(i, nref, 0)
found = True
i += 1
else:
SandyBarbourPrint("Navaid not found (1) " + navaid + " (" + outID[0] + " was found)")
nref = XPLMGetNextNavAid(nref)
# Set destination
XPLMSetDestinationFMSEntry(0)
XPLMSetDisplayedFMSEntry(0)
return 1
return 0
| nico87/xp-plugins-fmsloader | PI_FMSLoader.py | Python | gpl-2.0 | 7,464 | [
"VisIt"
] | 105514b77740c328ab8438b62c66762a91d3752db1c8104485e139ed24d765c4 |
# coding: utf-8
# # Using Python to Access NCEI Archived NEXRAD Level 2 Data
# This notebook shows how to access the THREDDS Data Server (TDS) instance that is serving up archived NEXRAD Level 2 data hosted on Amazon S3. The TDS provides a mechanism to query for available data files, as well as provides access to the data as native volume files, through OPeNDAP, and using its own CDMRemote protocol. Since we're using Python, we can take advantage of Unidata's Siphon package, which provides an easy API for talking to THREDDS servers.
#
# **NOTE:** Due to data charges, the TDS instance in AWS only allows access to .edu domains. For other users interested in using Siphon to access radar data, you can access recent (2 weeks') data by changing the server URL below to: http://thredds.ucar.edu/thredds/radarServer/nexrad/level2/IDD/
#
# **But first!**
# Bookmark these resources for when you want to use Siphon later!
# + [latest Siphon documentation](http://siphon.readthedocs.org/en/latest/)
# + [Siphon github repo](https://github.com/Unidata/siphon)
# + [TDS documentation](http://www.unidata.ucar.edu/software/thredds/current/tds/TDS.html)
# ## Downloading the single latest volume
#
# Just a bit of initial set-up to use inline figures and quiet some warnings.
# In[1]:
import matplotlib
import warnings
warnings.filterwarnings("ignore", category=matplotlib.cbook.MatplotlibDeprecationWarning)
get_ipython().magic(u'matplotlib inline')
# First we'll create an instance of RadarServer to point to the appropriate radar server access URL.
# In[2]:
# The S3 URL did not work for me, despite .edu domain
#url = 'http://thredds-aws.unidata.ucar.edu/thredds/radarServer/nexrad/level2/S3/'
#Trying motherlode URL
url = 'http://thredds.ucar.edu/thredds/radarServer/nexrad/level2/IDD/'
from siphon.radarserver import RadarServer
rs = RadarServer(url)
# Next, we'll create a new query object to help request the data. Using the chaining methods, let's ask for the latest data at the radar KLVX (Louisville, KY). We see that when the query is represented as a string, it shows the encoded URL.
# In[3]:
from datetime import datetime, timedelta
query = rs.query()
query.stations('KLVX').time(datetime.utcnow())
# We can use the RadarServer instance to check our query, to make sure we have required parameters and that we have chosen valid station(s) and variable(s)
#
# In[4]:
rs.validate_query(query)
# Make the request, which returns an instance of TDSCatalog; this handles parsing the returned XML information.
# In[5]:
catalog = rs.get_catalog(query)
# We can look at the datasets on the catalog to see what data we found by the query. We find one volume in the return, since we asked for the volume nearest to a single time.
# In[6]:
catalog.datasets
# We can pull that dataset out of the dictionary and look at the available access URLs. We see URLs for OPeNDAP, CDMRemote, and HTTPServer (direct download).
# In[7]:
ds = list(catalog.datasets.values())[0]
ds.access_urls
# We'll use the CDMRemote reader in Siphon and pass it the appropriate access URL.
# In[8]:
from siphon.cdmr import Dataset
data = Dataset(ds.access_urls['CdmRemote'])
# We define some helper functions to make working with the data easier. One takes the raw data and converts it to floating point values with the missing data points appropriately marked. The other helps with converting the polar coordinates (azimuth and range) to Cartesian (x and y).
# In[9]:
import numpy as np
def raw_to_masked_float(var, data):
# Values come back signed. If the _Unsigned attribute is set, we need to convert
# from the range [-127, 128] to [0, 255].
if var._Unsigned:
data = data & 255
# Mask missing points
data = np.ma.array(data, mask=data==0)
# Convert to float using the scale and offset
return data * var.scale_factor + var.add_offset
def polar_to_cartesian(az, rng):
az_rad = np.deg2rad(az)[:, None]
x = rng * np.sin(az_rad)
y = rng * np.cos(az_rad)
return x, y
# The CDMRemote reader provides an interface that is almost identical to the usual python NetCDF interface. We pull out the variables we need for azimuth and range, as well as the data itself.
# In[10]:
sweep = 0
ref_var = data.variables['Reflectivity_HI']
ref_data = ref_var[sweep]
rng = data.variables['distanceR_HI'][:]
az = data.variables['azimuthR_HI'][sweep]
# Then convert the raw data to floating point values and the polar coordinates to Cartesian.
# In[11]:
ref = raw_to_masked_float(ref_var, ref_data)
x, y = polar_to_cartesian(az, rng)
# MetPy is a Python package for meteorology (Documentation: http://metpy.readthedocs.org and GitHub: http://github.com/MetPy/MetPy). We import MetPy and use it to get the colortable and value mapping information for the NWS Reflectivity data.
# In[12]:
from metpy.plots import ctables # For NWS colortable
ref_norm, ref_cmap = ctables.registry.get_with_steps('NWSReflectivity', 5, 5)
# Finally, we plot them up using matplotlib and cartopy. We create a helper function for making a map to keep things simpler later.
# In[13]:
import matplotlib.pyplot as plt
import cartopy
def new_map(fig, lon, lat):
# Create projection centered on the radar. This allows us to use x
# and y relative to the radar.
proj = cartopy.crs.LambertConformal(central_longitude=lon, central_latitude=lat)
# New axes with the specified projection
ax = fig.add_subplot(1, 1, 1, projection=proj)
# Add coastlines
ax.coastlines('50m', 'black', linewidth=2, zorder=2)
# Grab state borders
state_borders = cartopy.feature.NaturalEarthFeature(
category='cultural', name='admin_1_states_provinces_lines',
scale='50m', facecolor='none')
ax.add_feature(state_borders, edgecolor='black', linewidth=1, zorder=3)
return ax
# ## Download a collection of historical data
# This time we'll make a query based on a longitude, latitude point and using a time range.
# In[14]:
query = rs.query()
#dt = datetime(2012, 10, 29, 15) # Our specified time
dt = datetime(2016, 6, 8, 18) # Our specified time
query.lonlat_point(-73.687, 41.175).time_range(dt, dt + timedelta(hours=1))
# The specified longitude, latitude are in NY and the TDS helpfully finds the closest station to that point. We can see that for this time range we obtained multiple datasets.
# In[15]:
cat = rs.get_catalog(query)
cat.datasets
# Grab the first dataset so that we can get the longitude and latitude of the station and make a map for plotting. We'll go ahead and specify some longitude and latitude bounds for the map.
# In[16]:
ds = list(cat.datasets.values())[0]
data = Dataset(ds.access_urls['CdmRemote'])
# Pull out the data of interest
sweep = 0
rng = data.variables['distanceR_HI'][:]
az = data.variables['azimuthR_HI'][sweep]
ref_var = data.variables['Reflectivity_HI']
# Convert data to float and coordinates to Cartesian
ref = raw_to_masked_float(ref_var, ref_var[sweep])
x, y = polar_to_cartesian(az, rng)
# Use the function to make a new map and plot a colormapped view of the data
# In[17]:
fig = plt.figure(figsize=(10, 10))
ax = new_map(fig, data.StationLongitude, data.StationLatitude)
# Set limits in lat/lon space
ax.set_extent([-77, -70, 38, 42])
# Add ocean and land background
ocean = cartopy.feature.NaturalEarthFeature('physical', 'ocean', scale='50m',
edgecolor='face',
facecolor=cartopy.feature.COLORS['water'])
land = cartopy.feature.NaturalEarthFeature('physical', 'land', scale='50m',
edgecolor='face',
facecolor=cartopy.feature.COLORS['land'])
ax.add_feature(ocean, zorder=-1)
ax.add_feature(land, zorder=-1)
#ax = new_map(fig, data.StationLongitude, data.StationLatitude)
ax.pcolormesh(x, y, ref, cmap=ref_cmap, norm=ref_norm, zorder=0);
# Now we can loop over the collection of returned datasets and plot them. As we plot, we collect the returned plot objects so that we can use them to make an animated plot. We also add a timestamp for each plot.
# In[18]:
meshes = []
for item in sorted(cat.datasets.items()):
# After looping over the list of sorted datasets, pull the actual Dataset object out
# of our list of items and access over CDMRemote
ds = item[1]
data = Dataset(ds.access_urls['CdmRemote'])
# Pull out the data of interest
sweep = 0
rng = data.variables['distanceR_HI'][:]
az = data.variables['azimuthR_HI'][sweep]
ref_var = data.variables['Reflectivity_HI']
# Convert data to float and coordinates to Cartesian
ref = raw_to_masked_float(ref_var, ref_var[sweep])
x, y = polar_to_cartesian(az, rng)
# Plot the data and the timestamp
mesh = ax.pcolormesh(x, y, ref, cmap=ref_cmap, norm=ref_norm, zorder=0)
text = ax.text(0.65, 0.03, data.time_coverage_start, transform=ax.transAxes,
fontdict={'size':16})
# Collect the things we've plotted so we can animate
meshes.append((mesh, text))
# Using matplotlib, we can take a collection of ``Artists`` that have been plotted and turn them into an animation. With matplotlib 1.5 (1.5-rc2 is available now!), this animation can be converted to HTML5 video viewable in the notebook.
# In[19]:
# Set up matplotlib to do the conversion to HTML5 video
import matplotlib
matplotlib.rcParams['animation.html'] = 'html5'
# Create an animation
from matplotlib.animation import ArtistAnimation
ArtistAnimation(fig, meshes)
# In[ ]:
| rsignell-usgs/notebook | NEXRAD/THREDDS_Radar_Server.py | Python | mit | 9,611 | [
"NetCDF"
] | f6840ce42c20ef3224edbcaf7785b48c95123495d6fb3445b20198af90247846 |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
# Copyright 2012-2014 Brian May
#
# This file is part of python-tldap.
#
# python-tldap is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# python-tldap is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with python-tldap If not, see <http://www.gnu.org/licenses/>.
import six
import tldap
import tldap.schemas.rfc
import tldap.transaction
import tldap.exceptions
import tldap.modlist
import tldap.test.slapd
import tldap.tests.base as base
import tldap.tests.schemas as test_schemas
import ldap3
import ldap3.core.exceptions
server = None
NO_SUCH_OBJECT = ldap3.core.exceptions.LDAPNoSuchObjectResult
class BackendTest(base.LdapTestCase):
def get(self, c, base):
"""
returns ldap object for search_string
raises MultipleResultsException if more than one
entry exists for given search string
"""
result_data = list(c.search(base, ldap3.SEARCH_SCOPE_BASE_OBJECT))
no_results = len(result_data)
self.assertEqual(no_results, 1)
return result_data[0][1]
def test_check_password(self):
result = tldap.connection.check_password(
'cn=Manager,dc=python-ldap,dc=org',
'password'
)
self.assertEqual(result, True)
result = tldap.connection.check_password(
'cn=Manager,dc=python-ldap,dc=org',
'password2'
)
self.assertEqual(result, False)
def test_transactions(self):
c = tldap.connection
modlist = tldap.modlist.addModlist({
'givenName': ["Tux"],
'sn': ["Torvalds"],
'cn': ["Tux Torvalds"],
'telephoneNumber': ["000"],
'mail': ["tuz@example.org"],
'o': ["Linux Rules"],
'userPassword': ["silly"],
'objectClass': [
'top', 'person', 'organizationalPerson', 'inetOrgPerson'],
})
c.add("ou=People, dc=python-ldap,dc=org", modlist)
# test explicit roll back
with tldap.transaction.commit_on_success():
c.add("uid=tux, ou=People, dc=python-ldap,dc=org", modlist)
c.modify("uid=tux, ou=People, dc=python-ldap,dc=org", {
'sn': (ldap3.MODIFY_REPLACE, "Gates")})
c.rollback()
self.assertRaises(NO_SUCH_OBJECT, self.get, c,
"uid=tux, ou=People, dc=python-ldap,dc=org")
# test roll back on exception
try:
with tldap.transaction.commit_on_success():
c.add("uid=tux, ou=People, dc=python-ldap,dc=org", modlist)
c.modify("uid=tux, ou=People, dc=python-ldap,dc=org", {
'sn': (ldap3.MODIFY_REPLACE, "Gates")})
raise RuntimeError("testing failure")
except RuntimeError:
pass
self.assertRaises(NO_SUCH_OBJECT, self.get, c,
"uid=tux, ou=People, dc=python-ldap,dc=org")
# test success commits
with tldap.transaction.commit_on_success():
c.add("uid=tux, ou=People, dc=python-ldap,dc=org", modlist)
c.modify("uid=tux, ou=People, dc=python-ldap,dc=org", {
'sn': (ldap3.MODIFY_REPLACE, "Gates")})
self.assertEqual(self.get(
c, "uid=tux, ou=People, dc=python-ldap,dc=org")['sn'], [b"Gates"])
self.assertEqual(self.get(
c, "uid=tux, ou=People, dc=python-ldap,dc=org")['telephoneNumber'],
[b"000"])
# test deleting attribute *of new object* with rollback
try:
with tldap.transaction.commit_on_success():
c.modify("uid=tux, ou=People, dc=python-ldap,dc=org", {
"telephoneNumber": (ldap3.MODIFY_DELETE, ['000'])})
self.assertRaises(KeyError, lambda: self.get(
c, "uid=tux, ou=People, dc=python-ldap,dc=org")[
'telephoneNumber'])
c.fail() # raises TestFailure during commit causing rollback
c.commit()
except tldap.exceptions.TestFailure:
pass
else:
self.fail("Exception not generated")
self.assertEqual(self.get(
c, "uid=tux, ou=People, dc=python-ldap,dc=org")['telephoneNumber'],
[b"000"])
# test deleting attribute *of new object* with success
with tldap.transaction.commit_on_success():
c.modify("uid=tux, ou=People, dc=python-ldap,dc=org", {
"telephoneNumber": (ldap3.MODIFY_DELETE, [])})
self.assertRaises(KeyError, lambda: self.get(
c, "uid=tux, ou=People, dc=python-ldap,dc=org")[
'telephoneNumber'])
self.assertRaises(KeyError, lambda: self.get(
c, "uid=tux, ou=People, dc=python-ldap,dc=org")['telephoneNumber'])
# test adding attribute with rollback
try:
with tldap.transaction.commit_on_success():
c.modify("uid=tux, ou=People, dc=python-ldap,dc=org", {
"telephoneNumber": (ldap3.MODIFY_ADD, ["111"])})
self.assertEqual(self.get(
c, "uid=tux, ou=People, dc=python-ldap,dc=org")[
'telephoneNumber'],
[b"111"])
self.assertRaises(
ldap3.core.exceptions.LDAPAttributeOrValueExistsResult,
lambda: c.modify(
"uid=tux, ou=People, dc=python-ldap,dc=org", {
'telephoneNumber': (ldap3.MODIFY_ADD, ["111"])})
)
c.fail() # raises TestFailure during commit causing rollback
c.commit()
except tldap.exceptions.TestFailure:
pass
else:
self.fail("Exception not generated")
self.assertRaises(KeyError, lambda: self.get(
c, "uid=tux, ou=People, dc=python-ldap,dc=org")['telephoneNumber'])
# test adding attribute with success
with tldap.transaction.commit_on_success():
c.modify("uid=tux, ou=People, dc=python-ldap,dc=org", {
'telephoneNumber': (ldap3.MODIFY_ADD, ["111"])})
self.assertEqual(self.get(
c, "uid=tux, ou=People, dc=python-ldap,dc=org")[
'telephoneNumber'], [b"111"])
self.assertRaises(
ldap3.core.exceptions.LDAPAttributeOrValueExistsResult,
lambda: c.modify(
"uid=tux, ou=People, dc=python-ldap,dc=org", {
'telephoneNumber': (ldap3.MODIFY_ADD, ["111"])})
)
self.assertEqual(self.get(
c, "uid=tux, ou=People, dc=python-ldap,dc=org")['telephoneNumber'],
[b"111"])
# test search scopes
c.add("ou=Groups, dc=python-ldap,dc=org", {
"objectClass": ["top", "organizationalunit"]
})
r = c.search(
"uid=tux, ou=People, dc=python-ldap,dc=org",
ldap3.SEARCH_SCOPE_BASE_OBJECT, "(uid=tux)")
self.assertEqual(len(list(r)), 1)
r = c.search(
"ou=People, dc=python-ldap,dc=org",
ldap3.SEARCH_SCOPE_BASE_OBJECT, "(uid=tux)")
self.assertEqual(len(list(r)), 0)
r = c.search(
"dc=python-ldap,dc=org",
ldap3.SEARCH_SCOPE_BASE_OBJECT, "(uid=tux)")
self.assertEqual(len(list(r)), 0)
r = c.search(
"ou=Groups, dc=python-ldap,dc=org",
ldap3.SEARCH_SCOPE_BASE_OBJECT, "(uid=tux)")
self.assertEqual(len(list(r)), 0)
r = c.search(
"dc=python-ldap,dc=org",
ldap3.SEARCH_SCOPE_BASE_OBJECT, "(uid=tux)")
self.assertEqual(len(list(r)), 0)
r = c.search(
"uid=tux, ou=People, dc=python-ldap,dc=org",
ldap3.SEARCH_SCOPE_SINGLE_LEVEL, "(uid=tux)")
self.assertEqual(len(list(r)), 0)
r = c.search(
"ou=People, dc=python-ldap,dc=org",
ldap3.SEARCH_SCOPE_SINGLE_LEVEL, "(uid=tux)")
self.assertEqual(len(list(r)), 1)
r = c.search(
"dc=python-ldap,dc=org",
ldap3.SEARCH_SCOPE_SINGLE_LEVEL, "(uid=tux)")
self.assertEqual(len(list(r)), 0)
r = c.search(
"ou=Groups, dc=python-ldap,dc=org",
ldap3.SEARCH_SCOPE_SINGLE_LEVEL, "(uid=tux)")
self.assertEqual(len(list(r)), 0)
r = c.search(
"dc=python-ldap,dc=org",
ldap3.SEARCH_SCOPE_BASE_OBJECT, "(uid=tux)")
self.assertEqual(len(list(r)), 0)
r = c.search(
"uid=tux, ou=People, dc=python-ldap,dc=org",
ldap3.SEARCH_SCOPE_WHOLE_SUBTREE, "(uid=tux)")
self.assertEqual(len(list(r)), 1)
r = c.search(
"ou=People, dc=python-ldap,dc=org",
ldap3.SEARCH_SCOPE_WHOLE_SUBTREE, "(uid=tux)")
self.assertEqual(len(list(r)), 1)
r = c.search(
"dc=python-ldap,dc=org",
ldap3.SEARCH_SCOPE_WHOLE_SUBTREE, "(uid=tux)")
self.assertEqual(len(list(r)), 1)
r = c.search(
"ou=Groups, dc=python-ldap,dc=org",
ldap3.SEARCH_SCOPE_WHOLE_SUBTREE, "(uid=tux)")
self.assertEqual(len(list(r)), 0)
r = c.search(
"dc=python-ldap,dc=org",
ldap3.SEARCH_SCOPE_BASE_OBJECT, "(uid=tux)")
self.assertEqual(len(list(r)), 0)
# test replacing attribute with rollback
try:
with tldap.transaction.commit_on_success():
c.modify("uid=tux, ou=People, dc=python-ldap,dc=org", {
"telephoneNumber": (ldap3.MODIFY_REPLACE, ["222"])})
self.assertEqual(self.get(
c, "uid=tux, ou=People, dc=python-ldap,dc=org")[
'telephoneNumber'],
[b"222"])
c.fail() # raises TestFailure during commit causing rollback
c.commit()
except tldap.exceptions.TestFailure:
pass
else:
self.fail("Exception not generated")
self.assertEqual(self.get(
c, "uid=tux, ou=People, dc=python-ldap,dc=org")['telephoneNumber'],
[b"111"])
# test replacing attribute with success
with tldap.transaction.commit_on_success():
c.modify("uid=tux, ou=People, dc=python-ldap,dc=org", {
'telephoneNumber': (ldap3.MODIFY_REPLACE, "222")})
self.assertEqual(self.get(
c, "uid=tux, ou=People, dc=python-ldap,dc=org")[
'telephoneNumber'],
[b"222"])
self.assertEqual(self.get(
c, "uid=tux, ou=People, dc=python-ldap,dc=org")['telephoneNumber'],
[b"222"])
# test deleting attribute value with rollback
try:
with tldap.transaction.commit_on_success():
c.modify("uid=tux, ou=People, dc=python-ldap,dc=org", {
"telephoneNumber": (ldap3.MODIFY_DELETE, "222")})
self.assertRaises(KeyError, lambda: self.get(
c, "uid=tux, ou=People, dc=python-ldap,dc=org")[
'telephoneNumber'])
c.fail() # raises TestFailure during commit causing rollback
c.commit()
except tldap.exceptions.TestFailure:
pass
else:
self.fail("Exception not generated")
self.assertEqual(self.get(
c, "uid=tux, ou=People, dc=python-ldap,dc=org")['telephoneNumber'],
[b"222"])
# test deleting attribute value with success
with tldap.transaction.commit_on_success():
c.modify("uid=tux, ou=People, dc=python-ldap,dc=org", {
"telephoneNumber": (ldap3.MODIFY_DELETE, "222")})
self.assertRaises(KeyError, lambda: self.get(
c,
"uid=tux, ou=People, dc=python-ldap,dc=org")['telephoneNumber']
)
self.assertRaises(KeyError, lambda: self.get(
c, "uid=tux, ou=People, dc=python-ldap,dc=org")['telephoneNumber'])
# test success when 3rd statement fails; need to roll back 2nd and 1st
# statements
try:
with tldap.transaction.commit_on_success():
c.modify("uid=tux, ou=People, dc=python-ldap,dc=org", {
"sn": (ldap3.MODIFY_REPLACE, "Milkshakes")})
self.assertEqual(self.get(
c, "uid=tux, ou=People, dc=python-ldap,dc=org")['sn'],
[b"Milkshakes"])
c.modify("uid=tux, ou=People, dc=python-ldap,dc=org", {
"sn": (ldap3.MODIFY_REPLACE, "Bannas")})
self.assertEqual(self.get(
c, "uid=tux, ou=People, dc=python-ldap,dc=org")['sn'],
[b"Bannas"])
c.fail() # raises TestFailure during commit causing rollback
c.commit()
except tldap.exceptions.TestFailure:
pass
else:
self.fail("Exception not generated")
self.assertEqual(self.get(
c, "uid=tux, ou=People, dc=python-ldap,dc=org")['sn'], [b"Gates"])
# test rename with rollback
try:
with tldap.transaction.commit_on_success():
c.rename(
"uid=tux, ou=People, dc=python-ldap,dc=org", 'uid=tuz')
c.modify("uid=tuz, ou=People, dc=python-ldap,dc=org", {
"sn": (ldap3.MODIFY_REPLACE, "Tuz")})
self.assertRaises(NO_SUCH_OBJECT, self.get, c,
"uid=tux, ou=People, dc=python-ldap,dc=org")
self.assertEqual(self.get(
c, "uid=tuz, ou=People, dc=python-ldap,dc=org")['sn'],
[b"Tuz"])
c.fail() # raises TestFailure during commit causing rollback
c.commit()
except tldap.exceptions.TestFailure:
pass
else:
self.fail("Exception not generated")
self.assertEqual(self.get(
c, "uid=tux, ou=People, dc=python-ldap,dc=org")['sn'], [b"Gates"])
self.assertRaises(NO_SUCH_OBJECT, self.get, c,
"uid=tuz, ou=People, dc=python-ldap,dc=org")
# test rename with success
with tldap.transaction.commit_on_success():
c.rename("uid=tux, ou=People, dc=python-ldap,dc=org", 'uid=tuz')
c.modify("uid=tuz, ou=People, dc=python-ldap,dc=org", {
'sn': (ldap3.MODIFY_REPLACE, "Tuz")})
self.assertRaises(NO_SUCH_OBJECT, self.get, c,
"uid=tux, ou=People, dc=python-ldap,dc=org")
self.assertEqual(self.get(
c, "uid=tuz, ou=People, dc=python-ldap,dc=org")['sn'],
[b"Tuz"])
self.assertRaises(NO_SUCH_OBJECT, self.get, c,
"uid=tux, ou=People, dc=python-ldap,dc=org")
self.assertEqual(self.get(
c, "uid=tuz, ou=People, dc=python-ldap,dc=org")['sn'], [b"Tuz"])
# test rename back with success
with tldap.transaction.commit_on_success():
c.modify("uid=tuz, ou=People, dc=python-ldap,dc=org", {
'sn': (ldap3.MODIFY_REPLACE, "Gates")})
c.rename("uid=tuz, ou=People, dc=python-ldap,dc=org", 'uid=tux')
self.assertEqual(self.get(
c, "uid=tux, ou=People, dc=python-ldap,dc=org")['sn'],
[b"Gates"])
self.assertRaises(NO_SUCH_OBJECT, self.get, c,
"uid=tuz, ou=People, dc=python-ldap,dc=org")
self.assertEqual(self.get(
c, "uid=tux, ou=People, dc=python-ldap,dc=org")['sn'], [b"Gates"])
self.assertRaises(NO_SUCH_OBJECT, self.get, c,
"uid=tuz, ou=People, dc=python-ldap,dc=org")
# test rename with success
with tldap.transaction.commit_on_success():
c.rename("uid=tux, ou=People, dc=python-ldap,dc=org", 'cn=tux')
c.modify("cn=tux, ou=People, dc=python-ldap,dc=org", {
'sn': (ldap3.MODIFY_REPLACE, "Tuz")})
self.assertRaises(NO_SUCH_OBJECT, self.get, c,
"uid=tux, ou=People, dc=python-ldap,dc=org")
self.assertEqual(self.get(
c, "cn=tux, ou=People, dc=python-ldap,dc=org")['sn'], [b"Tuz"])
self.assertRaises(NO_SUCH_OBJECT, self.get, c,
"uid=tux, ou=People, dc=python-ldap,dc=org")
self.assertEqual(self.get(
c, "cn=tux, ou=People, dc=python-ldap,dc=org")['sn'], [b"Tuz"])
# test rename back with success
with tldap.transaction.commit_on_success():
c.modify("cn=tux, ou=People, dc=python-ldap,dc=org", {
'sn': (ldap3.MODIFY_REPLACE, "Gates")})
c.rename("cn=tux, ou=People, dc=python-ldap,dc=org", 'uid=tux')
self.assertEqual(self.get(
c, "uid=tux, ou=People, dc=python-ldap,dc=org")['sn'],
[b"Gates"])
self.assertRaises(NO_SUCH_OBJECT, self.get, c,
"cn=tux, ou=People, dc=python-ldap,dc=org")
self.assertEqual(self.get(
c, "uid=tux, ou=People, dc=python-ldap,dc=org")['sn'], [b"Gates"])
self.assertRaises(NO_SUCH_OBJECT, self.get, c,
"cn=tux, ou=People, dc=python-ldap,dc=org")
# test rename with success
with tldap.transaction.commit_on_success():
c.rename("uid=tux, ou=People, dc=python-ldap,dc=org",
'cn=Tux Torvalds')
c.modify("cn=Tux Torvalds, ou=People, dc=python-ldap,dc=org", {
'sn': (ldap3.MODIFY_REPLACE, "Tuz")})
self.assertRaises(NO_SUCH_OBJECT, self.get, c,
"uid=tux, ou=People, dc=python-ldap,dc=org")
self.assertEqual(self.get(
c, "cn=Tux Torvalds, ou=People, dc=python-ldap,dc=org")['sn'],
[b"Tuz"])
self.assertRaises(NO_SUCH_OBJECT, self.get, c,
"uid=tux, ou=People, dc=python-ldap,dc=org")
self.assertEqual(self.get(
c, "cn=Tux Torvalds, ou=People, dc=python-ldap,dc=org")['sn'],
[b"Tuz"])
# test rename back with success
with tldap.transaction.commit_on_success():
c.modify("cn=Tux Torvalds, ou=People, dc=python-ldap,dc=org", {
'sn': (ldap3.MODIFY_REPLACE, "Gates")})
c.modify("cn=Tux Torvalds, ou=People, dc=python-ldap,dc=org", {
"cn": (ldap3.MODIFY_ADD, ["meow"])})
c.rename(
"cn=Tux Torvalds, ou=People, dc=python-ldap,dc=org", 'uid=tux')
c.modify("uid=Tux, ou=People, dc=python-ldap,dc=org", {
"cn": (ldap3.MODIFY_REPLACE, "Tux Torvalds")})
self.assertEqual(self.get(
c, "uid=tux, ou=People, dc=python-ldap,dc=org")['sn'],
[b"Gates"])
self.assertEqual(self.get(
c, "uid=tux, ou=People, dc=python-ldap,dc=org")['cn'],
[b"Tux Torvalds"])
self.assertRaises(
NO_SUCH_OBJECT, self.get, c,
"cn=Tux Torvalds, ou=People, dc=python-ldap,dc=org")
self.assertEqual(self.get(
c, "uid=tux, ou=People, dc=python-ldap,dc=org")['sn'], [b"Gates"])
self.assertEqual(self.get(
c, "uid=tux, ou=People, dc=python-ldap,dc=org")['cn'],
[b"Tux Torvalds"])
self.assertRaises(NO_SUCH_OBJECT, self.get, c,
"cn=Tux Torvalds, ou=People, dc=python-ldap,dc=org")
# test move with rollback
self.get(c, "ou=People, dc=python-ldap,dc=org")
self.get(c, "ou=Groups, dc=python-ldap,dc=org")
self.get(c, "uid=tux, ou=People, dc=python-ldap,dc=org")
self.assertRaises(NO_SUCH_OBJECT, self.get, c,
"uid=tux, ou=Group, dc=python-ldap,dc=org")
try:
with tldap.transaction.commit_on_success():
c.rename(
"uid=tux, ou=People, dc=python-ldap,dc=org",
"uid=tux", "ou=Groups, dc=python-ldap,dc=org")
self.assertRaises(NO_SUCH_OBJECT, self.get, c,
"uid=tux, ou=People, dc=python-ldap,dc=org")
self.get(c, "uid=tux, ou=Groups, dc=python-ldap,dc=org")
c.fail() # raises TestFailure during commit causing rollback
c.commit()
except tldap.exceptions.TestFailure:
pass
else:
self.fail("Exception not generated")
self.assertEqual(self.get(
c, "uid=tux, ou=People, dc=python-ldap,dc=org")['sn'], [b"Gates"])
self.assertRaises(NO_SUCH_OBJECT, self.get, c,
"uid=tux, ou=Groups, dc=python-ldap,dc=org")
# test move with success
self.get(c, "uid=tux, ou=People, dc=python-ldap,dc=org")
with tldap.transaction.commit_on_success():
c.rename(
"uid=tux, ou=People, dc=python-ldap,dc=org",
"uid=tux", "ou=Groups, dc=python-ldap,dc=org")
self.assertRaises(NO_SUCH_OBJECT, self.get, c,
"uid=tux, ou=People, dc=python-ldap,dc=org")
self.get(c, "uid=tux, ou=Groups, dc=python-ldap,dc=org")
self.assertRaises(NO_SUCH_OBJECT, self.get, c,
"uid=tux, ou=People, dc=python-ldap,dc=org")
self.get(c, "uid=tux, ou=Groups, dc=python-ldap,dc=org")
# test move back
with tldap.transaction.commit_on_success():
c.rename(
"uid=tux, ou=Groups, dc=python-ldap,dc=org",
"uid=tux", "ou=People, dc=python-ldap,dc=org")
self.get(c, "uid=tux, ou=People, dc=python-ldap,dc=org")
self.assertRaises(NO_SUCH_OBJECT, self.get, c,
"uid=tux, ou=Groups, dc=python-ldap,dc=org")
self.get(c, "uid=tux, ou=People, dc=python-ldap,dc=org")
self.assertRaises(NO_SUCH_OBJECT, self.get, c,
"uid=tux, ou=Groups, dc=python-ldap,dc=org")
# test roll back on error of delete and add of same user
try:
with tldap.transaction.commit_on_success():
c.delete("uid=tux, ou=People, dc=python-ldap,dc=org")
self.assertRaises(NO_SUCH_OBJECT, self.get, c,
"uid=tux, ou=People, dc=python-ldap,dc=org")
# c.add("uid=tux, ou=People, dc=python-ldap,dc=org", modlist)
c.fail() # raises TestFailure during commit causing rollback
c.commit()
except tldap.exceptions.TestFailure:
pass
else:
self.fail("Exception not generated")
self.assertEqual(self.get(
c, "uid=tux, ou=People, dc=python-ldap,dc=org")['sn'], [b"Gates"])
# test delate and add same user
with tldap.transaction.commit_on_success():
c.delete("uid=tux, ou=People, dc=python-ldap,dc=org")
self.assertRaises(NO_SUCH_OBJECT, self.get, c,
"uid=tux, ou=People, dc=python-ldap,dc=org")
c.add("uid=tux, ou=People, dc=python-ldap,dc=org", modlist)
self.assertEqual(self.get(
c, "uid=tux, ou=People, dc=python-ldap,dc=org")['sn'],
[b"Torvalds"])
# test delate
with tldap.transaction.commit_on_success():
c.delete("uid=tux, ou=People, dc=python-ldap,dc=org")
self.assertRaises(NO_SUCH_OBJECT, self.get, c,
"uid=tux, ou=People, dc=python-ldap,dc=org")
class ModelTest(base.LdapTestCase):
def test_transactions(self):
organizationalUnit = tldap.schemas.rfc.organizationalUnit
organizationalUnit.objects.create(
dn="ou=People, dc=python-ldap,dc=org")
organizationalUnit.objects.create(
dn="ou=Groups, dc=python-ldap,dc=org")
c = tldap.connection
person = test_schemas.person
DoesNotExist = person.DoesNotExist
AlreadyExists = person.AlreadyExists
get = person.objects.get
get_or_create = person.objects.get_or_create
create = person.objects.create
kwargs = {
'givenName': "Tux",
'sn': "Torvalds",
'cn': "Tux Torvalds",
'telephoneNumber': "000",
'mail': "tuz@example.org",
'o': "Linux Rules",
'userPassword': "silly",
}
# test explicit roll back
with tldap.transaction.commit_on_success():
p = create(uid="tux", **kwargs)
p.sn = "Gates"
p.save()
c.rollback()
self.assertRaises(DoesNotExist, get, uid="tux")
# test roll back on exception
try:
with tldap.transaction.commit_on_success():
p = create(uid="tux", **kwargs)
p.sn = "Gates"
p.save()
raise RuntimeError("testing failure")
except RuntimeError:
pass
self.assertRaises(DoesNotExist, get, uid="tux")
# test success commits
with tldap.transaction.commit_on_success():
p = create(uid="tux", **kwargs)
p.sn = "Gates"
p.save()
self.assertEqual(get(uid="tux").sn, "Gates")
self.assertEqual(get(uid="tux").telephoneNumber, "000")
# test deleting attribute
p, created = get_or_create(uid="tux")
self.assertEqual(created, False)
p.telephoneNumber = None
# test deleting attribute *of new object* with rollback
try:
with tldap.transaction.commit_on_success():
p.save()
self.assertEqual(get(uid="tux").telephoneNumber, None)
c.fail() # raises TestFailure during commit causing rollback
c.commit()
except tldap.exceptions.TestFailure:
pass
else:
self.fail("Exception not generated")
self.assertEqual(get(uid="tux").telephoneNumber, "000")
# test deleting attribute *of new object* with success
with tldap.transaction.commit_on_success():
p.save()
self.assertEqual(get(uid="tux").telephoneNumber, None)
self.assertEqual(get(uid="tux").telephoneNumber, None)
# test adding attribute
p, created = get_or_create(uid="tux")
self.assertEqual(created, False)
p.telephoneNumber = "111"
# test adding attribute with rollback
try:
with tldap.transaction.commit_on_success():
p.save()
self.assertEqual(get(uid="tux").telephoneNumber, "111")
c.fail() # raises TestFailure during commit causing rollback
c.commit()
except tldap.exceptions.TestFailure:
pass
else:
self.fail("Exception not generated")
self.assertEqual(get(uid="tux").telephoneNumber, None)
# test adding attribute with success
with tldap.transaction.commit_on_success():
p.save()
self.assertEqual(get(uid="tux").telephoneNumber, "111")
self.assertEqual(get(uid="tux").telephoneNumber, "111")
# test replacing attribute
p, created = get_or_create(uid="tux")
self.assertEqual(created, False)
p.telephoneNumber = "222"
# test replacing attribute with rollback
try:
with tldap.transaction.commit_on_success():
p.save()
self.assertEqual(get(uid="tux").telephoneNumber, "222")
c.fail() # raises TestFailure during commit causing rollback
c.commit()
except tldap.exceptions.TestFailure:
pass
else:
self.fail("Exception not generated")
self.assertEqual(get(uid="tux").telephoneNumber, "111")
# test replacing attribute with success
with tldap.transaction.commit_on_success():
p.save()
self.assertEqual(get(uid="tux").telephoneNumber, "222")
self.assertEqual(get(uid="tux").telephoneNumber, "222")
# test deleting attribute
p, created = get_or_create(uid="tux")
self.assertEqual(created, False)
p.telephoneNumber = None
# test deleting attribute *of new object* with rollback
try:
with tldap.transaction.commit_on_success():
p.save()
self.assertEqual(get(uid="tux").telephoneNumber, None)
c.fail() # raises TestFailure during commit causing rollback
c.commit()
except tldap.exceptions.TestFailure:
pass
else:
self.fail("Exception not generated")
self.assertEqual(get(uid="tux").telephoneNumber, "222")
# test deleting attribute *of new object* with success
with tldap.transaction.commit_on_success():
p.save()
self.assertEqual(get(uid="tux").telephoneNumber, None)
self.assertEqual(get(uid="tux").telephoneNumber, None)
# test success when 3rd statement fails; need to roll back 2nd and 1st
# statements
try:
with tldap.transaction.commit_on_success():
p = get(uid="tux")
p.sn = "Milkshakes"
p.save()
self.assertEqual(get(uid="tux").sn, "Milkshakes")
p.sn = "Bannas"
p.save()
self.assertEqual(get(uid="tux").sn, "Bannas")
self.assertRaises(AlreadyExists,
lambda: p.save(force_add=True))
c.fail() # raises TestFailure during commit causing rollback
c.commit()
except tldap.exceptions.TestFailure:
pass
else:
self.fail("Exception not generated")
self.assertEqual(get(uid="tux").sn, "Gates")
# test delate and add same user
with tldap.transaction.commit_on_success():
p = get(uid="tux")
p.delete()
self.assertRaises(DoesNotExist, get, uid="tux")
p.save()
self.assertEqual(get(uid="tux").sn, "Gates")
self.assertEqual(get(uid="tux").sn, "Gates")
# test rename with rollback
try:
with tldap.transaction.commit_on_success():
p = get(uid="tux")
p.rename(uid='tuz')
p.sn = "Tuz"
p.save()
self.assertRaises(DoesNotExist, get, uid="tux")
self.assertEqual(get(uid="tuz").sn, "Tuz")
c.fail() # raises TestFailure during commit causing rollback
c.commit()
except tldap.exceptions.TestFailure:
pass
else:
self.fail("Exception not generated")
self.assertEqual(get(uid="tux").sn, "Gates")
self.assertRaises(DoesNotExist, get, uid="tuz")
# test rename with success
with tldap.transaction.commit_on_success():
p = get(uid="tux")
p.rename(uid='tuz')
p.sn = "Tuz"
p.save()
self.assertRaises(DoesNotExist, get, uid="tux")
self.assertEqual(get(uid="tuz").sn, "Tuz")
self.assertRaises(DoesNotExist, get, uid="tux")
self.assertEqual(get(uid="tuz").sn, "Tuz")
# test rename back with success
with tldap.transaction.commit_on_success():
p = get(uid="tuz")
p.rename(uid='tux')
p.sn = "Gates"
p.save()
self.assertEqual(get(uid="tux").sn, "Gates")
self.assertRaises(DoesNotExist, get, uid="tuz")
self.assertEqual(get(uid="tux").sn, "Gates")
self.assertRaises(DoesNotExist, get, uid="tuz")
# test move with rollback
try:
with tldap.transaction.commit_on_success():
p = get(uid="tux")
p.rename("ou=Groups, dc=python-ldap,dc=org")
self.assertRaises(DoesNotExist, get, uid="tux")
c = tldap.connection
groups = person.objects.base_dn(
"ou=Groups, dc=python-ldap,dc=org")
groups.get(uid="tux")
c.fail() # raises TestFailure during commit causing rollback
c.commit()
except tldap.exceptions.TestFailure:
pass
else:
self.fail("Exception not generated")
self.assertEqual(get(uid="tux").sn, "Gates")
groups = person.objects.base_dn("ou=Groups, dc=python-ldap,dc=org")
self.assertRaises(DoesNotExist, groups.get, uid="tux")
# test move with success
with tldap.transaction.commit_on_success():
p = get(uid="tux")
p.rename("ou=Groups, dc=python-ldap,dc=org")
self.assertRaises(DoesNotExist, get, uid="tux")
groups = person.objects.base_dn("ou=Groups, dc=python-ldap,dc=org")
groups.get(uid="tux")
self.assertRaises(DoesNotExist, get, uid="tux")
groups = person.objects.base_dn("ou=Groups, dc=python-ldap,dc=org")
groups.get(uid="tux")
# test move back with success
with tldap.transaction.commit_on_success():
groups = person.objects.base_dn("ou=Groups, dc=python-ldap,dc=org")
p = groups.get(uid="tux")
p.rename("ou=People, dc=python-ldap,dc=org")
self.assertEqual(get(uid="tux").sn, "Gates")
groups = person.objects.base_dn("ou=Groups, dc=python-ldap,dc=org")
self.assertRaises(DoesNotExist, groups.get, uid="tux")
self.assertEqual(get(uid="tux").sn, "Gates")
groups = person.objects.base_dn("ou=Groups, dc=python-ldap,dc=org")
self.assertRaises(DoesNotExist, groups.get, uid="tux")
# hack for testing
for i in p._meta.fields:
if i.name == "cn":
i._max_instances = 2
# test rename with success
with tldap.transaction.commit_on_success():
p = get(uid="tux")
p.rename(cn='tux')
self.assertEqual(p.cn, ["Tux Torvalds", "tux"])
with tldap.transaction.commit_on_success():
p.sn = "Tuz"
p.save()
self.assertRaises(DoesNotExist, get, uid="tux")
self.assertEqual(
get(dn="cn=tux, ou=People, dc=python-ldap,dc=org").sn, "Tuz")
self.assertEqual(
get(dn="cn=tux, ou=People, dc=python-ldap,dc=org").uid, None)
self.assertEqual(
get(dn="cn=tux, ou=People, dc=python-ldap,dc=org").cn,
["Tux Torvalds", "tux"])
self.assertRaises(DoesNotExist, get, uid="tux")
self.assertEqual(
get(dn="cn=tux, ou=People, dc=python-ldap,dc=org").sn, "Tuz")
self.assertEqual(
get(dn="cn=tux, ou=People, dc=python-ldap,dc=org").uid, None)
self.assertEqual(get(dn="cn=tux, ou=People, dc=python-ldap,dc=org")
.cn, ["Tux Torvalds", "tux"])
# test rename back with success
with tldap.transaction.commit_on_success():
p = get(dn="cn=tux, ou=People, dc=python-ldap,dc=org")
p.rename(uid='tux')
self.assertEqual(p.cn, ["Tux Torvalds"])
with tldap.transaction.commit_on_success():
p.sn = "Gates"
p.save()
self.assertEqual(get(uid="tux").sn, "Gates")
self.assertRaises(DoesNotExist, get,
dn="cn=tux, ou=People, dc=python-ldap,dc=org")
self.assertEqual(get(uid="tux").uid, "tux")
self.assertEqual(get(uid="tux").cn, ["Tux Torvalds"])
self.assertEqual(get(uid="tux").sn, "Gates")
self.assertRaises(
DoesNotExist, get, dn="cn=tux, ou=People, dc=python-ldap,dc=org")
self.assertEqual(get(uid="tux").uid, "tux")
self.assertEqual(get(uid="tux").cn, ["Tux Torvalds"])
# test rename with success
with tldap.transaction.commit_on_success():
p = get(uid="tux")
p.rename(cn='Tux Torvalds')
self.assertEqual(p.cn, ["Tux Torvalds"])
with tldap.transaction.commit_on_success():
p.sn = "Tuz"
p.save()
self.assertRaises(DoesNotExist, get, uid="tux")
self.assertEqual(get(
dn="cn=Tux Torvalds, ou=People, dc=python-ldap,dc=org").sn,
"Tuz")
self.assertEqual(get(
dn="cn=Tux Torvalds, ou=People, dc=python-ldap,dc=org").uid,
None)
self.assertEqual(get(
dn="cn=Tux Torvalds, ou=People, dc=python-ldap,dc=org").cn,
["Tux Torvalds"])
self.assertRaises(DoesNotExist, get, uid="tux")
self.assertEqual(get(
dn="cn=Tux Torvalds, ou=People, dc=python-ldap,dc=org").sn, "Tuz")
self.assertEqual(get(
dn="cn=Tux Torvalds, ou=People, dc=python-ldap,dc=org").uid, None)
self.assertEqual(get(
dn="cn=Tux Torvalds, ou=People, dc=python-ldap,dc=org").cn,
["Tux Torvalds"])
# test rename back with success
with tldap.transaction.commit_on_success():
p = get(dn="cn=Tux Torvalds, ou=People, dc=python-ldap,dc=org")
p.cn = ['sss', 'Tux Torvalds']
p.save()
p.rename(uid='tux')
self.assertEqual(p.cn, ["sss"])
with tldap.transaction.commit_on_success():
p.sn = "Gates"
p.cn = ['Tux Torvalds']
p.save()
self.assertEqual(get(uid="tux").sn, "Gates")
self.assertRaises(
DoesNotExist, get,
dn="cn=Tux Torvalds, ou=People, dc=python-ldap,dc=org")
self.assertEqual(get(uid="tux").uid, "tux")
self.assertEqual(get(uid="tux").cn, ["Tux Torvalds"])
self.assertEqual(get(uid="tux").sn, "Gates")
self.assertRaises(
DoesNotExist, get,
dn="cn=Tux Torvalds, ou=People, dc=python-ldap,dc=org")
self.assertEqual(get(uid="tux").uid, "tux")
self.assertEqual(get(uid="tux").cn, ["Tux Torvalds"])
# unhack for testing
for i in p._meta.fields:
if i.name == "cn":
i._max_instances = 1
# test roll back on error of delete and add of same user
old_p = p
try:
with tldap.transaction.commit_on_success():
p.delete()
self.assertRaises(DoesNotExist, get, uid="tux")
p = create(uid="tux", **kwargs)
self.assertRaises(AlreadyExists, create, uid="tux", **kwargs)
c.fail() # raises TestFailure during commit causing rollback
c.commit()
except tldap.exceptions.TestFailure:
pass
else:
self.fail("Exception not generated")
self.assertEqual(get(uid="tux").sn, "Gates")
# test delate
with tldap.transaction.commit_on_success():
old_p.delete()
self.assertRaises(DoesNotExist, get, uid="tux")
return
def test_query(self):
organizationalUnit = tldap.schemas.rfc.organizationalUnit
organizationalUnit.objects.create(
dn="ou=People, dc=python-ldap,dc=org", ou="People")
organizationalUnit = tldap.schemas.rfc.organizationalUnit
organizationalUnit.objects.create(
dn="ou=Group, dc=python-ldap,dc=org", ou="Group")
person = test_schemas.person
group = test_schemas.group
kwargs = {
'givenName': "Tux",
'sn': "Torvalds",
'cn': "Tux Torvalds",
'telephoneNumber': "000",
'mail': "tuz@example.org",
'o': six.u("Linux Rules £"),
'userPassword': "silly",
}
p1 = person.objects.create(uid="tux", **kwargs)
p2 = person.objects.create(uid="tuz", **kwargs)
p = person.objects.get(dn="uid=tux, ou=People, dc=python-ldap,dc=org")
self.assertEqual(p.o, six.u("Linux Rules £"))
g1 = group.objects.create(cn="group1", gidNumber=10, memberUid=["tux"])
g2 = group.objects.create(
cn="group2", gidNumber=11, memberUid=["tux", "tuz"])
self.assertEqual(
person.objects.all()._get_filter(tldap.Q(uid='t\\ux')),
"(uid=t\\5cux)")
self.assertEqual(
person.objects.all()._get_filter(~tldap.Q(uid='tux')),
"(!(uid=tux))")
self.assertEqual(
person.objects.all(
)._get_filter(tldap.Q(uid='tux') | tldap.Q(uid='tuz')),
"(|(uid=tux)(uid=tuz))")
self.assertEqual(
person.objects.all(
)._get_filter(tldap.Q() | tldap.Q(uid='tux') | tldap.Q(uid='tuz')),
"(|(uid=tux)(uid=tuz))")
self.assertEqual(
person.objects.all(
)._get_filter(tldap.Q() & tldap.Q(uid='tux') & tldap.Q(uid='tuz')),
"(&(uid=tux)(uid=tuz))")
self.assertEqual(
person.objects.all()._get_filter(tldap.Q(
uid='tux') & (tldap.Q(uid='tuz') | tldap.Q(uid='meow'))),
"(&(uid=tux)(|(uid=tuz)(uid=meow)))")
person.objects.get(dn="uid=tux,ou=People, dc=python-ldap,dc=org")
self.assertRaises(person.DoesNotExist, person.objects.get,
dn="uid=tuy,ou=People, dc=python-ldap,dc=org")
person.objects.get(dn="uid=tuz,ou=People, dc=python-ldap,dc=org")
r = person.objects.filter(
tldap.Q(dn="uid=tux,ou=People, dc=python-ldap,dc=org") |
tldap.Q(dn="uid=tuy,ou=People, dc=python-ldap,dc=org") |
tldap.Q(dn="uid=tuz,ou=People, dc=python-ldap,dc=org"))
self.assertEqual(len(r), 2)
r = person.objects.filter(tldap.Q(uid='tux') | tldap.Q(uid='tuz'))
self.assertEqual(len(r), 2)
self.assertRaises(person.MultipleObjectsReturned, person.objects.get,
tldap.Q(uid='tux') | tldap.Q(uid='tuz'))
person.objects.get(~tldap.Q(uid='tuz'))
r = g1.secondary_people.all()
self.assertEqual(len(r), 1)
r = g2.secondary_people.all()
self.assertEqual(len(r), 2)
r = p1.secondary_groups.all()
self.assertEqual(len(r), 2)
r = p2.secondary_groups.all()
self.assertEqual(len(r), 1)
p1.secondary_groups.create(cn="drwho", gidNumber=12)
o, c = p1.secondary_groups.get_or_create(
cn="startrek", defaults={'gidNumber': 13})
self.assertEqual(c, True)
o, c = p1.secondary_groups.get_or_create(
cn="startrek", defaults={'gidNumber': 13})
self.assertEqual(c, False)
g1.secondary_people.create(
uid="dalek", sn="Exterminate", cn="You will be Exterminated!")
self.assertEqual(g1.memberUid, ['tux', 'dalek'])
o, c = g1.secondary_people.get_or_create(
uid="dalek_leader", sn="Exterminate",
defaults={'cn': "You will be Exterminated!"})
self.assertEqual(c, True)
self.assertEqual(g1.memberUid, ['tux', 'dalek', 'dalek_leader'])
o, c = g1.secondary_people.get_or_create(
uid="dalek_leader", sn="Exterminate",
defaults={'cn': "You will be Exterminated!"})
self.assertEqual(c, False)
self.assertEqual(g1.memberUid, ['tux', 'dalek', 'dalek_leader'])
r = g1.secondary_people.all()
self.assertEqual(len(r), 3)
r = g2.secondary_people.all()
self.assertEqual(len(r), 2)
r = p1.secondary_groups.all()
self.assertEqual(len(r), 4)
r = p2.secondary_groups.all()
self.assertEqual(len(r), 1)
u = g1.primary_accounts.create(
uid="cyberman", sn="Deleted", cn="You will be Deleted!",
uidNumber=100, homeDirectory="/tmp")
r = g1.primary_accounts.all()
self.assertEqual(len(r), 1)
group = r[0].primary_group.get()
self.assertEqual(group, g1)
self.assertEqual(group.memberUid, g1.memberUid)
group.primary_accounts.add(u)
self.assertRaises(tldap.exceptions.ValidationError,
group.primary_accounts.remove, u)
r = group.secondary_people.all()
self.assertEqual(len(r), 3)
group.secondary_people.clear()
r = group.secondary_people.all()
self.assertEqual(len(r), 0)
group.secondary_people.add(p1)
r = group.secondary_people.all()
self.assertEqual(len(r), 1)
group.secondary_people.remove(p1)
r = group.secondary_people.all()
self.assertEqual(len(r), 0)
u.secondary_groups.add(group)
r = group.secondary_people.all()
self.assertEqual(len(r), 1)
u.secondary_groups.remove(group)
r = group.secondary_people.all()
self.assertEqual(len(r), 0)
u.primary_group = g2
u.save()
r = g2.primary_accounts.all()
self.assertEqual(len(list(r)), 1)
u.primary_group = None
self.assertRaises(tldap.exceptions.ValidationError, u.save)
u1 = person.objects.get(dn="uid=tux,ou=People, dc=python-ldap,dc=org")
u2 = person.objects.get(dn="uid=tuz,ou=People, dc=python-ldap,dc=org")
u2.managed_by = u1
u2.save()
self.assertEqual(u2.managed_by.get_obj(), u1)
r = u1.manager_of.all()
self.assertEqual(len(list(r)), 1)
r = person.objects.filter(managed_by=u1)
self.assertEqual(len(list(r)), 1)
r = person.objects.filter(manager_of=u2)
self.assertEqual(len(list(r)), 1)
u1.manager_of.remove(u2)
self.assertEqual(u2.managed_by.get_obj(), None)
r = u1.manager_of.all()
self.assertEqual(len(list(r)), 0)
u1.manager_of.add(u2)
self.assertEqual(u2.managed_by.get_obj(), u1)
r = u1.manager_of.all()
self.assertEqual(len(list(r)), 1)
class UserAPITest(base.LdapTestCase):
def setUp(self):
super(UserAPITest, self).setUp()
organizationalUnit = tldap.schemas.rfc.organizationalUnit
organizationalUnit.objects.create(
dn="ou=People, dc=python-ldap,dc=org", ou="People")
organizationalUnit = tldap.schemas.rfc.organizationalUnit
organizationalUnit.objects.create(
dn="ou=Group, dc=python-ldap,dc=org", ou="Group")
self.account = test_schemas.account
self.group = test_schemas.group
account = self.account
group = self.group
u1 = account.objects.create(
uid="testuser1", uidNumber=1000, gidNumber=10001,
homeDirectory="/tmp", sn='User',
mail="t.user1@example.com",
cn="Test User 1")
u2 = account.objects.create(
uid="testuser2", uidNumber=1001, gidNumber=10001,
homeDirectory="/tmp", sn='User',
mail="t.user2@example.com",
cn="Test User 2")
u3 = account.objects.create(
uid="testuser3", uidNumber=1002, gidNumber=10001,
homeDirectory="/tmp", sn='User',
mail="t.user3@example.com",
cn="Test User 3")
g1 = group.objects.create(
cn="systems", gidNumber=10001,
)
g1.secondary_accounts = [u1]
g2 = group.objects.create(
cn="empty", gidNumber=10002,
description="Empty Group",
)
g2.secondary_accounts = []
g3 = group.objects.create(
cn="full", gidNumber=10003,
)
g3.secondary_accounts = [u1, u2, u3]
def test_get_users(self):
self.assertEqual(len(self.account.objects.all()), 3)
def test_get_user(self):
u = self.account.objects.get(uid='testuser3')
self.assertEqual(u.mail, 't.user3@example.com')
def test_delete_user(self):
self.assertEqual(len(self.account.objects.all()), 3)
u = self.account.objects.get(uid='testuser2')
u.delete()
self.assertEqual(len(self.account.objects.all()), 2)
def test_in_ldap(self):
self.account.objects.get(uid='testuser1')
self.assertRaises(self.account.DoesNotExist,
self.account.objects.get, cn='testuser4')
def test_update_user(self):
u = self.account.objects.get(uid='testuser1')
self.assertEqual(u.sn, 'User')
u.sn = "Bloggs"
u.save()
u = self.account.objects.get(uid='testuser1')
self.assertEqual(u.sn, 'Bloggs')
def test_update_user_no_modifications(self):
u = self.account.objects.get(uid='testuser1')
self.assertEqual(u.sn, 'User')
u.sn = "User"
u.save()
u = self.account.objects.get(uid='testuser1')
self.assertEqual(u.sn, 'User')
# def test_lock_unlock(self):
# u = self.account.objects.get(uid='testuser1')
# u.unlock()
# u.save()
#
# u = self.account.objects.get(uid='testuser1')
# self.assertEqual(u.is_locked(), False)
# u.lock()
# u.save()
#
# u = self.account.objects.get(uid='testuser1')
# self.assertEqual(u.is_locked(), True)
#
# u.unlock()
# u.save()
# self.assertEqual(u.is_locked(), False)
def test_user_slice(self):
self.account.objects.get(uid='testuser1').save()
users = self.account.objects.filter(
tldap.Q(cn__contains='nothing') | tldap.Q(cn__contains="user"))
self.assertEqual(users[0].uid, "testuser1")
self.assertEqual(users[1].uid, "testuser2")
self.assertEqual(users[2].uid, "testuser3")
self.assertRaises(IndexError, users.__getitem__, 3)
a = iter(users[1:4])
self.assertEqual(next(a).uid, "testuser2")
self.assertEqual(next(a).uid, "testuser3")
self.assertRaises(StopIteration, lambda: next(a))
def test_user_search(self):
self.account.objects.get(uid='testuser1').save()
users = self.account.objects.filter(cn__contains='User')
self.assertEqual(len(users), 3)
def test_user_search_one(self):
self.account.objects.get(uid='testuser1').save()
users = self.account.objects.filter(uid__contains='testuser1')
self.assertEqual(len(users), 1)
def test_user_search_empty(self):
self.account.objects.get(uid='testuser1').save()
users = self.account.objects.filter(cn__contains='nothing')
self.assertEqual(len(users), 0)
def test_user_search_multi(self):
self.account.objects.get(uid='testuser1').save()
users = self.account.objects.filter(
tldap.Q(cn__contains='nothing') | tldap.Q(cn__contains="user"))
self.assertEqual(len(users), 3)
def test_get_groups_empty(self):
u = self.account.objects.get(uid="testuser2")
u.secondary_groups.clear()
groups = u.secondary_groups.all()
self.assertEqual(len(groups), 0)
groups = self.group.objects.filter(secondary_accounts=u)
self.assertEqual(len(groups), 0)
def test_get_groups_one(self):
u = self.account.objects.get(uid="testuser2")
groups = u.secondary_groups.all()
self.assertEqual(len(groups), 1)
groups = self.group.objects.filter(secondary_accounts=u)
self.assertEqual(len(groups), 1)
def test_get_groups_many(self):
u = self.account.objects.get(uid="testuser1")
groups = u.secondary_groups.all()
self.assertEqual(len(groups), 2)
groups = self.group.objects.filter(secondary_accounts=u)
self.assertEqual(len(groups), 2)
class GroupAPITest(base.LdapTestCase):
def setUp(self):
super(GroupAPITest, self).setUp()
organizationalUnit = tldap.schemas.rfc.organizationalUnit
organizationalUnit.objects.create(
dn="ou=People, dc=python-ldap,dc=org", ou="People")
organizationalUnit = tldap.schemas.rfc.organizationalUnit
organizationalUnit.objects.create(
dn="ou=Group, dc=python-ldap,dc=org", ou="Group")
self.account = test_schemas.account
self.group = test_schemas.group
account = self.account
group = self.group
u1 = account.objects.create(
uid="testuser1", uidNumber=1000, gidNumber=10001,
homeDirectory="/tmp", sn='User',
mail="t.user1@example.com",
cn="Test User 1")
u2 = account.objects.create(
uid="testuser2", uidNumber=1001, gidNumber=10001,
homeDirectory="/tmp", sn='User',
mail="t.user2@example.com",
cn="Test User 2")
u3 = account.objects.create(
uid="testuser3", uidNumber=1002, gidNumber=10001,
homeDirectory="/tmp", sn='User',
mail="t.user3@example.com",
cn="Test User 3")
g1 = group.objects.create(
cn="systems", gidNumber=10001,
)
g1.secondary_accounts = [u1]
g2 = group.objects.create(
cn="empty", gidNumber=10002,
description="Empty Group",
)
g2.secondary_accounts = []
g3 = group.objects.create(
cn="full", gidNumber=10003,
)
g3.secondary_accounts = [u1, u2, u3]
def test_get_groups(self):
self.assertEqual(len(self.group.objects.all()), 3)
def test_get_group(self):
g = self.group.objects.get(cn="systems")
self.assertEqual(g.cn, 'systems')
g = self.group.objects.get(cn="empty")
self.assertEqual(g.cn, 'empty')
g = self.group.objects.get(cn="full")
self.assertEqual(g.cn, 'full')
def test_delete_group(self):
g = self.group.objects.get(cn="full")
g.delete()
self.assertEqual(len(self.group.objects.all()), 2)
def test_update_group(self):
g = self.group.objects.get(cn="empty")
self.assertEqual(g.description, 'Empty Group')
g.description = "No Members"
g.save()
g = self.group.objects.get(cn="empty")
self.assertEqual(g.description, 'No Members')
def test_update_group_no_modifications(self):
g = self.group.objects.get(cn="empty")
self.assertEqual(g.description, 'Empty Group')
g.description = "Empty Group"
g.save()
g = self.group.objects.get(cn="empty")
self.assertEqual(g.description, 'Empty Group')
def test_no_group(self):
self.assertRaises(
self.group.DoesNotExist, self.group.objects.get, cn='nosuchgroup')
def test_get_members_empty(self):
g = self.group.objects.get(cn="empty")
members = g.secondary_accounts.all()
self.assertEqual(len(members), 0)
members = self.account.objects.filter(secondary_groups=g)
self.assertEqual(len(members), 0)
def test_get_members_one(self):
g = self.group.objects.get(cn="systems")
members = g.secondary_accounts.all()
self.assertEqual(len(members), 1)
members = self.account.objects.filter(secondary_groups=g)
self.assertEqual(len(members), 1)
def test_get_members_many(self):
g = self.group.objects.get(cn="full")
members = g.secondary_accounts.all()
self.assertEqual(len(members), 3)
members = self.account.objects.filter(secondary_groups=g)
self.assertEqual(len(members), 3)
def test_remove_group_member(self):
g = self.group.objects.get(cn="full")
u = g.secondary_accounts.get(uid="testuser2")
g.secondary_accounts.remove(u)
members = g.secondary_accounts.all()
self.assertEqual(len(members), 2)
def test_remove_group_member_one(self):
g = self.group.objects.get(cn="systems")
u = g.secondary_accounts.get(uid="testuser1")
g.secondary_accounts.remove(u)
members = g.secondary_accounts.all()
self.assertEqual(len(members), 0)
def test_remove_group_member_empty(self):
g = self.group.objects.get(cn="empty")
g.secondary_accounts.clear()
members = g.secondary_accounts.all()
self.assertEqual(len(members), 0)
def test_add_member(self):
g = self.group.objects.get(cn="systems")
u = self.account.objects.get(uid="testuser2")
g.secondary_accounts.add(u)
members = g.secondary_accounts.all()
self.assertEqual(len(members), 2)
def test_add_member_empty(self):
g = self.group.objects.get(cn="empty")
u = self.account.objects.get(uid="testuser2")
g.secondary_accounts.add(u)
members = g.secondary_accounts.all()
self.assertEqual(len(members), 1)
def test_add_member_exists(self):
g = self.group.objects.get(cn="full")
u = self.account.objects.get(uid="testuser2")
g.secondary_accounts.add(u)
members = g.secondary_accounts.all()
self.assertEqual(len(members), 3)
def test_add_group(self):
self.group.objects.create(cn='Admin')
self.assertEqual(len(self.group.objects.all()), 4)
g = self.group.objects.get(cn="Admin")
self.assertEqual(g.gidNumber, 10004)
def test_add_group_required_attributes(self):
self.assertRaises(
tldap.exceptions.ValidationError,
self.group.objects.create, description='Admin Group')
def test_add_group_override_generated(self):
self.group.objects.create(cn='Admin', gidNumber=10008)
self.assertEqual(len(self.group.objects.all()), 4)
g = self.group.objects.get(cn="Admin")
self.assertEqual(g.gidNumber, 10008)
def test_add_group_optional(self):
self.group.objects.create(cn='Admin', description='Admin Group')
self.assertEqual(len(self.group.objects.all()), 4)
g = self.group.objects.get(cn="Admin")
self.assertEqual(g.description, 'Admin Group')
| brianmay/python-tldap-debian | tldap/tests/tests.py | Python | gpl-3.0 | 58,797 | [
"Brian"
] | 89692c759fda39a081e2e7d35db2d4fe502fdf647e73d79dc38d106d40130bde |
#! /usr/bin/python
'''
Binary Tree Traversal
'''
from node_struct import TreeNode
class Solution:
def preOrderTraversalNonRecursive(self, root):
output = list()
nodes_in_tree = list()
current_node = root
while len(nodes_in_tree) > 0 or current_node:
while current_node:
output.append(current_node.val) # visit()
nodes_in_tree.append(current_node)
current_node = current_node.left
current_node = nodes_in_tree.pop()
current_node = current_node.right
return output
def inOrderTraversalNonRecursive(self, root):
output = list()
nodes_in_tree = list()
current_node = root
while len(nodes_in_tree) > 0 or current_node:
while current_node:
nodes_in_tree.append(current_node)
current_node = current_node.left
current_node = nodes_in_tree.pop()
output.append(current_node.val) # visit()
current_node = current_node.right
return output
def postOrderTraversalNonRecursive(self, root):
output = list()
nodes_in_tree = list()
current_node = root
prev_node = None
while current_node:
while current_node.left:
nodes_in_tree.append(current_node)
current_node = current_node.left
while not current_node.right or current_node.right == prev_node:
output.append(current_node.val)
prev_node = current_node
if len(nodes_in_tree) == 0:
return output
current_node = nodes_in_tree.pop()
nodes_in_tree.append(current_node)
current_node = current_node.right
return output
def printReverse(self, start, end, output):
self.reverse(start, end)
curr = end
while True:
output.append(curr.val)
if (curr == start):
break
curr = curr.right
self.reverse(start, end)
def levelOrderTraversal(self, root):
output = list()
if not root:
return output
nodes_in_tree = list()
nodes_in_tree.append(root)
nodes_in_tree.append(None)
curr_level = list()
while len(nodes_in_tree) > 0:
current_node = nodes_in_tree.pop(0)
if not current_node:
output.append(curr_level[:])
curr_level = list()
if len(nodes_in_tree) > 0:
nodes_in_tree.append(None)
else:
return output
else:
curr_level.append(current_node.val)
if current_node.left:
nodes_in_tree.append(current_node.left)
if current_node.right:
nodes_in_tree.append(current_node.right)
return output
def preorderMorris(self, root):
curr = root
output = list()
while curr:
if not curr.left:
output.append(curr.val)
curr = curr.right
else:
cursor = curr.left
while cursor.right and cursor.right != curr:
cursor = cursor.right
if cursor.right == curr:
cursor.right = None
curr = curr.right
else:
output.append(curr.val)
cursor.right = curr
curr = curr.left
return output
def inorderMorris(self, root):
curr = root
output = list()
while curr:
if not curr.left:
output.append(curr.val)
curr = curr.right
else:
cursor = curr.left
while cursor.right and cursor.right != curr:
cursor = cursor.right
if not cursor.right:
cursor.right = curr
curr = curr.left
else:
cursor.right = None
output.append(curr.val)
curr = curr.right
return output
def reverse(self, start, end):
if start == end:
return
x, y = start, start.right
while x != end:
z = y.right
y.right = x
x = y
y = z
def postorderMorris(self, root):
pivot = TreeNode(0)
pivot.left = root
curr = pivot
output = list()
def printReverse(start, end):
self.reverse(start, end)
curr = end
while True:
output.append(curr.val)
if (curr == start):
break
curr = curr.right
self.reverse(end, start)
while curr:
if not curr.left:
curr = curr.right
continue
cursor = curr.left
while cursor.right and cursor.right != curr:
cursor = cursor.right
if not cursor.right:
cursor.right = curr
curr = curr.left
else:
printReverse(curr.left, cursor)
cursor.right = None
curr = curr.right
return output
if __name__ == '__main__':
solution = Solution()
root = TreeNode(1)
root.left = TreeNode(0)
root.right = TreeNode(2)
root.right.right = TreeNode(3)
print 'pre order: %s' % solution.preOrderTraversalNonRecursive(root)
print 'pre order: %s' % solution.preorderMorris(root)
print 'in order: %s' % solution.inOrderTraversalNonRecursive(root)
print 'in order: %s' % solution.inorderMorris(root)
print 'post order: %s' % solution.postOrderTraversalNonRecursive(root)
print 'post order: %s' % solution.postorderMorris(root)
print 'level order: %s'% solution.levelOrderTraversal(root)
| shub0/algorithm-data-structure | python/tree_traversal.py | Python | bsd-3-clause | 6,044 | [
"VisIt"
] | a36dc18f3c65a70054794610084c2d1293abcc96328b4e7deee02fd99eeaddb0 |
"""DBF header definition.
TODO:
- handle encoding of the character fields
(encoding information stored in the DBF header)
"""
"""History (most recent first):
16-sep-2010 [als] fromStream: fix century of the last update field
11-feb-2007 [als] added .ignoreErrors
10-feb-2007 [als] added __getitem__: return field definitions
by field name or field number (zero-based)
04-jul-2006 [als] added export declaration
15-dec-2005 [yc] created
"""
__version__ = "$Revision: 1.6 $"[11:-2]
__date__ = "$Date: 2010/09/16 05:06:39 $"[7:-2]
__all__ = ["DbfHeader"]
import datetime
import io
import struct
import sys
from . import fields
from .utils import getDate
class DbfHeader:
"""Dbf header definition.
For more information about dbf header format visit
`http://www.clicketyclick.dk/databases/xbase/format/dbf.html#DBF_STRUCT`
Examples:
Create an empty dbf header and add some field definitions:
dbfh = DbfHeader()
dbfh.addField(("name", "C", 10))
dbfh.addField(("date", "D"))
dbfh.addField(DbfNumericFieldDef("price", 5, 2))
Create a dbf header with field definitions:
dbfh = DbfHeader([
("name", "C", 10),
("date", "D"),
DbfNumericFieldDef("price", 5, 2),
])
"""
__slots__ = ("signature", "fields", "lastUpdate", "recordLength",
"recordCount", "headerLength", "changed", "_ignore_errors")
# instance construction and initialization methods
def __init__(self, fields=None, headerLength=0, recordLength=0,
recordCount=0, signature=0x03, lastUpdate=None, ignoreErrors=False):
"""Initialize instance.
Arguments:
fields:
a list of field definitions;
recordLength:
size of the records;
headerLength:
size of the header;
recordCount:
number of records stored in DBF;
signature:
version number (aka signature). using 0x03 as a default meaning
"File without DBT". for more information about this field visit
``http://www.clicketyclick.dk/databases/xbase/format/dbf.html#DBF_NOTE_1_TARGET``
lastUpdate:
date of the DBF's update. this could be a string ('yymmdd' or
'yyyymmdd'), timestamp (int or float), datetime/date value,
a sequence (assuming (yyyy, mm, dd, ...)) or an object having
callable ``ticks`` field.
ignoreErrors:
error processing mode for DBF fields (boolean)
"""
self.signature = signature
if fields is None:
self.fields = []
else:
self.fields = list(fields)
self.lastUpdate = getDate(lastUpdate)
self.recordLength = recordLength
self.headerLength = headerLength
self.recordCount = recordCount
self.ignoreErrors = ignoreErrors
# XXX: I'm not sure this is safe to
# initialize `self.changed` in this way
self.changed = bool(self.fields)
# @classmethod
def fromString(cls, string):
"""Return header instance from the string object."""
return cls.fromStream(io.StringIO(str(string)))
fromString = classmethod(fromString)
# @classmethod
def fromStream(cls, stream):
"""Return header object from the stream."""
stream.seek(0)
first_32 = stream.read(32)
if type(first_32) != bytes:
_data = bytes(first_32, sys.getfilesystemencoding())
_data = first_32
(_cnt, _hdrLen, _recLen) = struct.unpack("<I2H", _data[4:12])
# reserved = _data[12:32]
_year = _data[1]
if _year < 80:
# dBase II started at 1980. It is quite unlikely
# that actual last update date is before that year.
_year += 2000
else:
_year += 1900
# create header object
_obj = cls(None, _hdrLen, _recLen, _cnt, _data[0],
(_year, _data[2], _data[3]))
# append field definitions
# position 0 is for the deletion flag
_pos = 1
_data = stream.read(1)
while _data != b'\r':
_data += stream.read(31)
_fld = fields.lookupFor(_data[11]).fromString(_data, _pos)
_obj._addField(_fld)
_pos = _fld.end
_data = stream.read(1)
return _obj
fromStream = classmethod(fromStream)
# properties
year = property(lambda self: self.lastUpdate.year)
month = property(lambda self: self.lastUpdate.month)
day = property(lambda self: self.lastUpdate.day)
def ignoreErrors(self, value):
"""Update `ignoreErrors` flag on self and all fields"""
self._ignore_errors = value = bool(value)
for _field in self.fields:
_field.ignoreErrors = value
ignoreErrors = property(
lambda self: self._ignore_errors,
ignoreErrors,
doc="""Error processing mode for DBF field value conversion
if set, failing field value conversion will return
``INVALID_VALUE`` instead of raising conversion error.
""")
# object representation
def __repr__(self):
_rv = """\
Version (signature): 0x%02x
Last update: %s
Header length: %d
Record length: %d
Record count: %d
FieldName Type Len Dec
""" % (self.signature, self.lastUpdate, self.headerLength,
self.recordLength, self.recordCount)
_rv += "\n".join(
["%10s %4s %3s %3s" % _fld.fieldInfo() for _fld in self.fields]
)
return _rv
# internal methods
def _addField(self, *defs):
"""Internal variant of the `addField` method.
This method doesn't set `self.changed` field to True.
Return value is a length of the appended records.
Note: this method doesn't modify ``recordLength`` and
``headerLength`` fields. Use `addField` instead of this
method if you don't exactly know what you're doing.
"""
# insure we have dbf.DbfFieldDef instances first (instantiation
# from the tuple could raise an error, in such a case I don't
# wanna add any of the definitions -- all will be ignored)
_defs = []
_recordLength = 0
for _def in defs:
if isinstance(_def, fields.DbfFieldDef):
_obj = _def
else:
(_name, _type, _len, _dec) = (tuple(_def) + (None,) * 4)[:4]
_cls = fields.lookupFor(_type)
_obj = _cls(_name, _len, _dec, ignoreErrors=self._ignore_errors)
_recordLength += _obj.length
_defs.append(_obj)
# and now extend field definitions and
# update record length
self.fields += _defs
return _recordLength
# interface methods
def addField(self, *defs):
"""Add field definition to the header.
Examples:
dbfh.addField(
("name", "C", 20),
dbf.DbfCharacterFieldDef("surname", 20),
dbf.DbfDateFieldDef("birthdate"),
("member", "L"),
)
dbfh.addField(("price", "N", 5, 2))
dbfh.addField(dbf.DbfNumericFieldDef("origprice", 5, 2))
"""
_oldLen = self.recordLength
self.recordLength += self._addField(*defs)
if not _oldLen:
self.recordLength += 1
# XXX: may be just use:
# self.recordeLength += self._addField(*defs) + bool(not _oldLen)
# recalculate headerLength
self.headerLength = 32 + (32 * len(self.fields)) + 1
self.changed = True
def write(self, stream):
"""Encode and write header to the stream."""
stream.seek(0)
stream.write(self.toString())
fields = [_fld.toString() for _fld in self.fields]
stream.write(''.join(fields).encode(sys.getfilesystemencoding()))
stream.write(b'\x0D') # cr at end of all header data
self.changed = False
def toString(self):
"""Returned 32 chars length string with encoded header."""
return struct.pack("<4BI2H",
self.signature,
self.year - 1900,
self.month,
self.day,
self.recordCount,
self.headerLength,
self.recordLength) + (b'\x00' * 20)
# TODO: figure out if bytes(utf-8) is correct here.
def setCurrentDate(self):
"""Update ``self.lastUpdate`` field with current date value."""
self.lastUpdate = datetime.date.today()
def __getitem__(self, item):
"""Return a field definition by numeric index or name string"""
if isinstance(item, str):
_name = item.upper()
for _field in self.fields:
if _field.name == _name:
return _field
else:
raise KeyError(item)
else:
# item must be field index
return self.fields[item]
# vim: et sts=4 sw=4 :
| kennethreitz/tablib | src/tablib/packages/dbfpy/header.py | Python | mit | 9,221 | [
"VisIt"
] | b64b8a30dd867890294001d09b4bf446e388a29171e275309b799325615cac9e |
import logging
import subprocess
from Queue import Queue
import threading
from galaxy import model
from galaxy.datatypes.data import nice_size
import os, errno
from time import sleep
log = logging.getLogger( __name__ )
class LocalJobRunner( object ):
"""
Job runner backed by a finite pool of worker threads. FIFO scheduling
"""
STOP_SIGNAL = object()
def __init__( self, app ):
"""Start the job runner with 'nworkers' worker threads"""
self.app = app
self.queue = Queue()
self.threads = []
nworkers = app.config.local_job_queue_workers
log.info( "starting workers" )
for i in range( nworkers ):
worker = threading.Thread( target=self.run_next )
worker.start()
self.threads.append( worker )
log.debug( "%d workers ready", nworkers )
def run_next( self ):
"""Run the next job, waiting until one is available if neccesary"""
while 1:
job_wrapper = self.queue.get()
if job_wrapper is self.STOP_SIGNAL:
return
try:
self.run_job( job_wrapper )
except:
log.exception( "Uncaught exception running job" )
def run_job( self, job_wrapper ):
job_wrapper.set_runner( 'local:///', None )
stderr = stdout = command_line = ''
# Prepare the job to run
try:
job_wrapper.prepare()
command_line = job_wrapper.get_command_line()
except:
job_wrapper.fail( "failure preparing job", exception=True )
log.exception("failure running job %d" % job_wrapper.job_id)
return
# If we were able to get a command line, run the job
if command_line:
env = os.environ
if job_wrapper.galaxy_lib_dir is not None:
if 'PYTHONPATH' in os.environ:
env['PYTHONPATH'] = "%s:%s" % ( os.environ['PYTHONPATH'], job_wrapper.galaxy_lib_dir )
else:
env['PYTHONPATH'] = job_wrapper.galaxy_lib_dir
try:
log.debug( 'executing: %s' % command_line )
proc = subprocess.Popen( args = command_line,
shell = True,
cwd = job_wrapper.working_directory,
stdout = subprocess.PIPE,
stderr = subprocess.PIPE,
env = env,
preexec_fn = os.setpgrp )
job_wrapper.set_runner( 'local:///', proc.pid )
job_wrapper.change_state( model.Job.states.RUNNING )
if self.app.config.output_size_limit > 0:
sleep_time = 1
while proc.poll() is None:
for outfile, size in job_wrapper.check_output_sizes():
if size > self.app.config.output_size_limit:
# Error the job immediately
job_wrapper.fail( 'Job output grew too large (greater than %s), please try different job parameters or' \
% nice_size( self.app.config.output_size_limit ) )
log.warning( 'Terminating job %s due to output %s growing larger than %s limit' \
% ( job_wrapper.job_id, os.path.basename( outfile ), nice_size( self.app.config.output_size_limit ) ) )
# Then kill it
os.killpg( proc.pid, 15 )
sleep( 1 )
if proc.poll() is None:
os.killpg( proc.pid, 9 )
proc.wait() # reap
log.debug( 'Job %s (pid %s) terminated' % ( job_wrapper.job_id, proc.pid ) )
return
sleep( sleep_time )
if sleep_time < 8:
# So we don't stat every second
sleep_time *= 2
stdout = proc.stdout.read()
stderr = proc.stderr.read()
proc.wait() # reap
log.debug('execution finished: %s' % command_line)
except Exception, exc:
job_wrapper.fail( "failure running job", exception=True )
log.exception("failure running job %d" % job_wrapper.job_id)
return
#run the metadata setting script here
#this is terminatable when output dataset/job is deleted
#so that long running set_meta()s can be cancelled without having to reboot the server
if job_wrapper.get_state() not in [ model.Job.states.ERROR, model.Job.states.DELETED ] and self.app.config.set_metadata_externally:
external_metadata_script = job_wrapper.setup_external_metadata( output_fnames = job_wrapper.get_output_fnames(), kwds = { 'overwrite' : False } ) #we don't want to overwrite metadata that was copied over in init_meta(), as per established behavior
log.debug( 'executing external set_meta script for job %d: %s' % ( job_wrapper.job_id, external_metadata_script ) )
external_metadata_proc = subprocess.Popen( args = external_metadata_script,
shell = True,
env = env,
preexec_fn = os.setpgrp )
job_wrapper.external_output_metadata.set_job_runner_external_pid( external_metadata_proc.pid )
external_metadata_proc.wait()
log.debug( 'execution of external set_meta finished for job %d' % job_wrapper.job_id )
# Finish the job
try:
job_wrapper.finish( stdout, stderr )
except:
log.exception("Job wrapper finish method failed")
job_wrapper.fail("Unable to finish job", exception=True)
def put( self, job_wrapper ):
"""Add a job to the queue (by job identifier)"""
# Change to queued state before handing to worker thread so the runner won't pick it up again
job_wrapper.change_state( model.Job.states.QUEUED )
self.queue.put( job_wrapper )
def shutdown( self ):
"""Attempts to gracefully shut down the worker threads"""
log.info( "sending stop signal to worker threads" )
for i in range( len( self.threads ) ):
self.queue.put( self.STOP_SIGNAL )
log.info( "local job runner stopped" )
def check_pid( self, pid ):
try:
os.kill( pid, 0 )
return True
except OSError, e:
if e.errno == errno.ESRCH:
log.debug( "check_pid(): PID %d is dead" % pid )
else:
log.warning( "check_pid(): Got errno %s when attempting to check PID %d: %s" %( errno.errorcode[e.errno], pid, e.strerror ) )
return False
def stop_job( self, job ):
#if our local job has JobExternalOutputMetadata associated, then our primary job has to have already finished
if job.external_output_metadata:
pid = job.external_output_metadata[0].job_runner_external_pid #every JobExternalOutputMetadata has a pid set, we just need to take from one of them
else:
pid = job.job_runner_external_id
if pid in [ None, '' ]:
log.warning( "stop_job(): %s: no PID in database for job, unable to stop" % job.id )
return
pid = int( pid )
if not self.check_pid( pid ):
log.warning( "stop_job(): %s: PID %d was already dead or can't be signaled" % ( job.id, pid ) )
return
for sig in [ 15, 9 ]:
try:
os.killpg( pid, sig )
except OSError, e:
log.warning( "stop_job(): %s: Got errno %s when attempting to signal %d to PID %d: %s" % ( job.id, errno.errorcode[e.errno], sig, pid, e.strerror ) )
return # give up
sleep( 2 )
if not self.check_pid( pid ):
log.debug( "stop_job(): %s: PID %d successfully killed with signal %d" %( job.id, pid, sig ) )
return
else:
log.warning( "stop_job(): %s: PID %d refuses to die after signaling TERM/KILL" %( job.id, pid ) )
def recover( self, job, job_wrapper ):
# local jobs can't be recovered
job_wrapper.change_state( model.Job.states.ERROR, info = "This job was killed when Galaxy was restarted. Please retry the job." )
| dbcls/dbcls-galaxy | lib/galaxy/jobs/runners/local.py | Python | mit | 8,816 | [
"Galaxy"
] | 20955c8f7b5708f44336734bb0a3684cd715bebeb8d1e786695ff99123f6f92a |
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 fileencoding=utf-8
#
# MDAnalysis --- http://www.mdanalysis.org
# Copyright (c) 2006-2016 The MDAnalysis Development Team and contributors
# (see the file AUTHORS for the full list of names)
#
# Released under the GNU Public Licence, v2 or any higher version
#
# Please cite your use of MDAnalysis in published work:
#
# R. J. Gowers, M. Linke, J. Barnoud, T. J. E. Reddy, M. N. Melo, S. L. Seyler,
# D. L. Dotson, J. Domanski, S. Buchoux, I. M. Kenney, and O. Beckstein.
# MDAnalysis: A Python package for the rapid analysis of molecular dynamics
# simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th
# Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy.
#
# N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and O. Beckstein.
# MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations.
# J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787
#
from __future__ import print_function
from six.moves import map
import MDAnalysis
import MDAnalysis.analysis.hbonds
from MDAnalysis import SelectionError, SelectionWarning
from numpy.testing import (assert_, assert_equal, assert_array_equal,
assert_raises)
import numpy as np
import itertools
import warnings
from MDAnalysisTests.datafiles import PDB_helix, GRO, XTC
# For type guessing:
from MDAnalysis.topology.core import guess_atom_type
from MDAnalysis.core.topologyattrs import Atomtypes
def guess_types(names):
"""GRO doesn't supply types, this returns an Attr"""
return Atomtypes(np.array(list(map(guess_atom_type, names)), dtype=object))
class TestHydrogenBondAnalysis(object):
def setUp(self):
self.universe = u = MDAnalysis.Universe(PDB_helix)
self.kwargs = {
'selection1': 'protein',
'selection2': 'protein',
'detect_hydrogens': "distance",
'distance': 3.0,
'angle': 150.0,
}
# ideal helix with 1 proline:
self.values = {
'num_bb_hbonds': u.atoms.n_residues - u.select_atoms('resname PRO').n_residues - 4,
'donor_resid': np.array([5, 6, 8, 9, 10, 11, 12, 13]),
'acceptor_resnm': np.array(['ALA', 'ALA', 'ALA', 'ALA', 'ALA', 'PRO', 'ALA', 'ALA'], dtype='U4'),
}
def _run(self, **kwargs):
kw = self.kwargs.copy()
kw.update(kwargs)
h = MDAnalysis.analysis.hbonds.HydrogenBondAnalysis(self.universe, **kw)
h.run(verbose=False)
return h
def test_helix_backbone(self):
h = self._run()
assert_equal(len(h.timeseries[0]),
self.values['num_bb_hbonds'], "wrong number of backbone hydrogen bonds")
assert_equal(h.timesteps, [0.0])
def test_zero_vs_1based(self):
h = self._run()
if h.timeseries[0]:
assert_equal((int(h.timeseries[0][0][0])-int(h.timeseries[0][0][2])),1)
assert_equal((int(h.timeseries[0][0][1])-int(h.timeseries[0][0][3])),1)
def test_generate_table(self):
h = self._run()
h.generate_table()
assert_equal(len(h.table),
self.values['num_bb_hbonds'], "wrong number of backbone hydrogen bonds in table")
assert_(isinstance(h.table, np.core.records.recarray))
assert_array_equal(h.table.donor_resid, self.values['donor_resid'])
assert_array_equal(h.table.acceptor_resnm, self.values['acceptor_resnm'])
@staticmethod
def test_true_traj():
u = MDAnalysis.Universe(GRO, XTC)
u.add_TopologyAttr(guess_types(u.atoms.names))
h = MDAnalysis.analysis.hbonds.HydrogenBondAnalysis(u,'protein','resname ASP', distance=3.0, angle=120.0)
h.run()
assert_equal(len(h.timeseries), 10)
def test_count_by_time(self):
h = self._run()
c = h.count_by_time()
assert_equal(c.tolist(), [(0.0, self.values['num_bb_hbonds'])])
def test_count_by_type(self):
h = self._run()
c = h.count_by_type()
assert_equal(c.frequency, self.values['num_bb_hbonds'] * [1.0])
def test_count_by_type(self):
h = self._run()
t = h.timesteps_by_type()
assert_equal(t.time, self.values['num_bb_hbonds'] * [0.0])
def tearDown(self):
del self.universe
class TestHydrogenBondAnalysisHeuristic(TestHydrogenBondAnalysis):
def setUp(self):
super(TestHydrogenBondAnalysisHeuristic, self).setUp()
self.kwargs['detect_hydrogens'] = "heuristic"
class TestHydrogenBondAnalysisHeavy(TestHydrogenBondAnalysis):
def setUp(self):
super(TestHydrogenBondAnalysisHeavy, self).setUp()
self.kwargs['distance_type'] = "heavy"
self.kwargs["distance"] = 3.5
class TestHydrogenBondAnalysisHeavyFail(TestHydrogenBondAnalysisHeavy):
def setUp(self):
super(TestHydrogenBondAnalysisHeavyFail, self).setUp()
self.kwargs["distance"] = 3.0
self.values['num_bb_hbonds'] = 0 # no H-bonds with a D-A distance < 3.0 A (they start at 3.05 A)
self.values['donor_resid'] = np.array([])
self.values['acceptor_resnm'] = np.array([], dtype="<U3")
class TestHydrogenBondAnalysisChecking(object):
def _setUp(self):
self.universe = u = MDAnalysis.Universe(PDB_helix)
self.kwargs = {
'selection1': 'protein',
'selection2': 'protein',
'detect_hydrogens': "distance",
'distance': 3.0,
'angle': 150.0,
}
def _tearDown(self):
del self.universe
def _run(self, **kwargs):
kw = self.kwargs.copy()
kw.update(kwargs)
with warnings.catch_warnings():
# ignore SelectionWarning
warnings.simplefilter("ignore")
h = MDAnalysis.analysis.hbonds.HydrogenBondAnalysis(self.universe, **kw)
h.run(verbose=False)
return h
def test_check_static_selections(self):
self._setUp()
try:
def run_HBA(s1, s2, s1type):
"""test that HydrogenBondAnalysis() raises SelectionError for missing donors/acceptors"""
# no donors/acceptors; only raises error if no updates
return self._run(selection1=s1, selection2=s2,
update_selection1=False, update_selection2=False,
selection1_type=s1type,
)
protein = "protein"
nothing = "resname ALA and not backbone"
for s1, s2, s1type in itertools.product((protein, nothing),
(protein, nothing),
("donor", "acceptor", "both")):
if s1 == s2 == protein:
def runOK():
"""test that HydrogenBondAnalysis() works for protein/protein"""
try:
h = run_HBA(s1, s2, s1type)
except:
raise AssertionError("HydrogenBondAnalysis protein/protein failed")
else:
return True
yield runOK
else:
yield assert_raises, SelectionError, run_HBA, s1, s2, s1type
finally:
self._tearDown()
def test_run_empty_selections(self):
self._setUp()
try:
def run_HBA(s1, s2, s1type):
# no donors/acceptors; should not raise error because updates=True
return self._run(selection1=s1, selection2=s2,
update_selection1=True, update_selection2=True,
selection1_type=s1type,
)
protein = "protein"
nothing = "resname ALA and not backbone"
for s1, s2, s1type in itertools.product((protein, nothing),
(protein, nothing),
("donor", "acceptor", "both")):
def run_HBA_dynamic_selections(*args):
try:
h = run_HBA(*args)
except:
raise AssertionError("HydrogenBondAnalysis with update=True failed")
else:
return True
yield run_HBA_dynamic_selections, s1, s2, s1type
finally:
self._tearDown()
| alejob/mdanalysis | testsuite/MDAnalysisTests/analysis/test_hbonds.py | Python | gpl-2.0 | 8,612 | [
"MDAnalysis"
] | caeb5a5a48401508c506b0e0ab50c8c8bbb44f457f827b0d1ee38bf3fa182c83 |
#!/usr/bin/env python3
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
import sys
import os
import unittest
import shutil
import time
import glob
from PyQt5 import QtCore, QtWidgets
from peacock.PostprocessorViewer.plugins.PostprocessorSelectPlugin import main
from peacock.utils import Testing
import mooseutils
class TestVectorPostprocessorSelectPlugin(Testing.PeacockImageTestCase):
"""
Test class for the ArtistToggleWidget which toggles postprocessor lines.
"""
#: QApplication: The main App for QT, this must be static to work correctly.
qapp = QtWidgets.QApplication(sys.argv)
def setUp(self):
"""
Creates the GUI containing the ArtistGroupWidget and the matplotlib figure axes.
"""
# Filenames to load
self._filename = '{}_test_*.csv'.format(self.__class__.__name__)
self._filename2 = '{}_test2_*.csv'.format(self.__class__.__name__)
# Read the data
filenames = [self._filename, self._filename2]
self._control, self._widget, self._window = main(filenames, mooseutils.VectorPostprocessorReader)
def copyfiles(self, partial=False):
"""
Move files into the temporary location.
"""
if partial:
shutil.copyfile('../input/vpp_000.csv', '{}_test_000.csv'.format(self.__class__.__name__))
shutil.copyfile('../input/vpp_001.csv', '{}_test_001.csv'.format(self.__class__.__name__))
else:
for i in [0,1,2,4]:
shutil.copyfile('../input/vpp_00{}.csv'.format(i), '{}_test_00{}.csv'.format(self.__class__.__name__, i))
for i in [0,1,3,5,7,9]:
shutil.copyfile('../input/vpp2_000{}.csv'.format(i), '{}_test2_000{}.csv'.format(self.__class__.__name__, i))
for data in self._widget._data:
data.load()
def tearDown(self):
"""
Remove temporary files.
"""
for filename in glob.glob(self._filename):
os.remove(filename)
for filename in glob.glob(self._filename2):
os.remove(filename)
def testEmpty(self):
"""
Test that an empty plot is possible.
"""
self.assertImage('testEmpty.png')
def testSelect(self):
"""
Test that plotting from multiple files works.
"""
self.copyfiles()
vars = ['y', 't*x**2']
for i in range(len(vars)):
self._control._groups[i]._toggles[vars[i]].CheckBox.setCheckState(QtCore.Qt.Checked)
self._control._groups[i]._toggles[vars[i]].CheckBox.clicked.emit(True)
self.assertImage('testSelect.png')
def testUpdateData(self):
"""
Test that a postprocessor data updates when file is changed.
"""
self.copyfiles(partial=True)
var = 'y'
self._control._groups[0]._toggles[var].CheckBox.setCheckState(QtCore.Qt.Checked)
self._control._groups[0]._toggles[var].CheckBox.clicked.emit(True)
self.assertImage('testUpdateData0.png')
# Reload the data (this would be done via a Timer)
time.sleep(1) # need to wait a bit for the modified time to change
self.copyfiles()
self.assertImage('testUpdateData1.png')
def testRepr(self):
"""
Test python scripting.
"""
self.copyfiles()
vars = ['y', 't*x**2']
for i in range(len(vars)):
self._control._groups[i]._toggles[vars[i]].CheckBox.setCheckState(QtCore.Qt.Checked)
self._control._groups[i]._toggles[vars[i]].CheckBox.clicked.emit(True)
output, imports = self._control.repr()
self.assertIn("data = mooseutils.VectorPostprocessorReader('TestVectorPostprocessorSelectPlugin_test_*.csv')", output)
self.assertIn("x = data('index (Peacock)')", output)
self.assertIn("y = data('y')", output)
self.assertIn("data = mooseutils.VectorPostprocessorReader('TestVectorPostprocessorSelectPlugin_test2_*.csv')", output)
if __name__ == '__main__':
unittest.main(module=__name__, verbosity=2)
| nuclear-wizard/moose | python/peacock/tests/postprocessor_tab/test_VectorPostprocessorSelectPlugin.py | Python | lgpl-2.1 | 4,345 | [
"MOOSE"
] | a472eb9a6045d4b3ca7e1dcc23a1ce733500f3221c91d5e6ab34466d6cbddd1f |
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import mock
import grpc
from grpc.experimental import aio
import math
import pytest
from proto.marshal.rules.dates import DurationRule, TimestampRule
from google.api_core import client_options
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import grpc_helpers
from google.api_core import grpc_helpers_async
from google.api_core import path_template
from google.auth import credentials as ga_credentials
from google.auth.exceptions import MutualTLSChannelError
from google.cloud.dataflow_v1beta3.services.snapshots_v1_beta3 import (
SnapshotsV1Beta3AsyncClient,
)
from google.cloud.dataflow_v1beta3.services.snapshots_v1_beta3 import (
SnapshotsV1Beta3Client,
)
from google.cloud.dataflow_v1beta3.services.snapshots_v1_beta3 import transports
from google.cloud.dataflow_v1beta3.types import snapshots
from google.oauth2 import service_account
from google.protobuf import duration_pb2 # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
import google.auth
def client_cert_source_callback():
return b"cert bytes", b"key bytes"
# If default endpoint is localhost, then default mtls endpoint will be the same.
# This method modifies the default endpoint so the client can produce a different
# mtls endpoint for endpoint testing purposes.
def modify_default_endpoint(client):
return (
"foo.googleapis.com"
if ("localhost" in client.DEFAULT_ENDPOINT)
else client.DEFAULT_ENDPOINT
)
def test__get_default_mtls_endpoint():
api_endpoint = "example.googleapis.com"
api_mtls_endpoint = "example.mtls.googleapis.com"
sandbox_endpoint = "example.sandbox.googleapis.com"
sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com"
non_googleapi = "api.example.com"
assert SnapshotsV1Beta3Client._get_default_mtls_endpoint(None) is None
assert (
SnapshotsV1Beta3Client._get_default_mtls_endpoint(api_endpoint)
== api_mtls_endpoint
)
assert (
SnapshotsV1Beta3Client._get_default_mtls_endpoint(api_mtls_endpoint)
== api_mtls_endpoint
)
assert (
SnapshotsV1Beta3Client._get_default_mtls_endpoint(sandbox_endpoint)
== sandbox_mtls_endpoint
)
assert (
SnapshotsV1Beta3Client._get_default_mtls_endpoint(sandbox_mtls_endpoint)
== sandbox_mtls_endpoint
)
assert (
SnapshotsV1Beta3Client._get_default_mtls_endpoint(non_googleapi)
== non_googleapi
)
@pytest.mark.parametrize(
"client_class", [SnapshotsV1Beta3Client, SnapshotsV1Beta3AsyncClient,]
)
def test_snapshots_v1_beta3_client_from_service_account_info(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_info"
) as factory:
factory.return_value = creds
info = {"valid": True}
client = client_class.from_service_account_info(info)
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "dataflow.googleapis.com:443"
@pytest.mark.parametrize(
"transport_class,transport_name",
[
(transports.SnapshotsV1Beta3GrpcTransport, "grpc"),
(transports.SnapshotsV1Beta3GrpcAsyncIOTransport, "grpc_asyncio"),
],
)
def test_snapshots_v1_beta3_client_service_account_always_use_jwt(
transport_class, transport_name
):
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=True)
use_jwt.assert_called_once_with(True)
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=False)
use_jwt.assert_not_called()
@pytest.mark.parametrize(
"client_class", [SnapshotsV1Beta3Client, SnapshotsV1Beta3AsyncClient,]
)
def test_snapshots_v1_beta3_client_from_service_account_file(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_file"
) as factory:
factory.return_value = creds
client = client_class.from_service_account_file("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
client = client_class.from_service_account_json("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "dataflow.googleapis.com:443"
def test_snapshots_v1_beta3_client_get_transport_class():
transport = SnapshotsV1Beta3Client.get_transport_class()
available_transports = [
transports.SnapshotsV1Beta3GrpcTransport,
]
assert transport in available_transports
transport = SnapshotsV1Beta3Client.get_transport_class("grpc")
assert transport == transports.SnapshotsV1Beta3GrpcTransport
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(SnapshotsV1Beta3Client, transports.SnapshotsV1Beta3GrpcTransport, "grpc"),
(
SnapshotsV1Beta3AsyncClient,
transports.SnapshotsV1Beta3GrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
@mock.patch.object(
SnapshotsV1Beta3Client,
"DEFAULT_ENDPOINT",
modify_default_endpoint(SnapshotsV1Beta3Client),
)
@mock.patch.object(
SnapshotsV1Beta3AsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(SnapshotsV1Beta3AsyncClient),
)
def test_snapshots_v1_beta3_client_client_options(
client_class, transport_class, transport_name
):
# Check that if channel is provided we won't create a new one.
with mock.patch.object(SnapshotsV1Beta3Client, "get_transport_class") as gtc:
transport = transport_class(credentials=ga_credentials.AnonymousCredentials())
client = client_class(transport=transport)
gtc.assert_not_called()
# Check that if channel is provided via str we will create a new one.
with mock.patch.object(SnapshotsV1Beta3Client, "get_transport_class") as gtc:
client = client_class(transport=transport_name)
gtc.assert_called()
# Check the case api_endpoint is provided.
options = client_options.ClientOptions(api_endpoint="squid.clam.whelk")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name, client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_MTLS_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has
# unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
with pytest.raises(MutualTLSChannelError):
client = client_class(transport=transport_name)
# Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}
):
with pytest.raises(ValueError):
client = client_class(transport=transport_name)
# Check the case quota_project_id is provided
options = client_options.ClientOptions(quota_project_id="octopus")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id="octopus",
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,use_client_cert_env",
[
(
SnapshotsV1Beta3Client,
transports.SnapshotsV1Beta3GrpcTransport,
"grpc",
"true",
),
(
SnapshotsV1Beta3AsyncClient,
transports.SnapshotsV1Beta3GrpcAsyncIOTransport,
"grpc_asyncio",
"true",
),
(
SnapshotsV1Beta3Client,
transports.SnapshotsV1Beta3GrpcTransport,
"grpc",
"false",
),
(
SnapshotsV1Beta3AsyncClient,
transports.SnapshotsV1Beta3GrpcAsyncIOTransport,
"grpc_asyncio",
"false",
),
],
)
@mock.patch.object(
SnapshotsV1Beta3Client,
"DEFAULT_ENDPOINT",
modify_default_endpoint(SnapshotsV1Beta3Client),
)
@mock.patch.object(
SnapshotsV1Beta3AsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(SnapshotsV1Beta3AsyncClient),
)
@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"})
def test_snapshots_v1_beta3_client_mtls_env_auto(
client_class, transport_class, transport_name, use_client_cert_env
):
# This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default
# mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists.
# Check the case client_cert_source is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
options = client_options.ClientOptions(
client_cert_source=client_cert_source_callback
)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
if use_client_cert_env == "false":
expected_client_cert_source = None
expected_host = client.DEFAULT_ENDPOINT
else:
expected_client_cert_source = client_cert_source_callback
expected_host = client.DEFAULT_MTLS_ENDPOINT
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case ADC client cert is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=client_cert_source_callback,
):
if use_client_cert_env == "false":
expected_host = client.DEFAULT_ENDPOINT
expected_client_cert_source = None
else:
expected_host = client.DEFAULT_MTLS_ENDPOINT
expected_client_cert_source = client_cert_source_callback
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case client_cert_source and ADC client cert are not provided.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class", [SnapshotsV1Beta3Client, SnapshotsV1Beta3AsyncClient]
)
@mock.patch.object(
SnapshotsV1Beta3Client,
"DEFAULT_ENDPOINT",
modify_default_endpoint(SnapshotsV1Beta3Client),
)
@mock.patch.object(
SnapshotsV1Beta3AsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(SnapshotsV1Beta3AsyncClient),
)
def test_snapshots_v1_beta3_client_get_mtls_endpoint_and_cert_source(client_class):
mock_client_cert_source = mock.Mock()
# Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "true".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
mock_api_endpoint = "foo"
options = client_options.ClientOptions(
client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint
)
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
options
)
assert api_endpoint == mock_api_endpoint
assert cert_source == mock_client_cert_source
# Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "false".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}):
mock_client_cert_source = mock.Mock()
mock_api_endpoint = "foo"
options = client_options.ClientOptions(
client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint
)
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
options
)
assert api_endpoint == mock_api_endpoint
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert doesn't exist.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert exists.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=mock_client_cert_source,
):
(
api_endpoint,
cert_source,
) = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
assert cert_source == mock_client_cert_source
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(SnapshotsV1Beta3Client, transports.SnapshotsV1Beta3GrpcTransport, "grpc"),
(
SnapshotsV1Beta3AsyncClient,
transports.SnapshotsV1Beta3GrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
def test_snapshots_v1_beta3_client_client_options_scopes(
client_class, transport_class, transport_name
):
# Check the case scopes are provided.
options = client_options.ClientOptions(scopes=["1", "2"],)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=["1", "2"],
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,grpc_helpers",
[
(
SnapshotsV1Beta3Client,
transports.SnapshotsV1Beta3GrpcTransport,
"grpc",
grpc_helpers,
),
(
SnapshotsV1Beta3AsyncClient,
transports.SnapshotsV1Beta3GrpcAsyncIOTransport,
"grpc_asyncio",
grpc_helpers_async,
),
],
)
def test_snapshots_v1_beta3_client_client_options_credentials_file(
client_class, transport_class, transport_name, grpc_helpers
):
# Check the case credentials file is provided.
options = client_options.ClientOptions(credentials_file="credentials.json")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
def test_snapshots_v1_beta3_client_client_options_from_dict():
with mock.patch(
"google.cloud.dataflow_v1beta3.services.snapshots_v1_beta3.transports.SnapshotsV1Beta3GrpcTransport.__init__"
) as grpc_transport:
grpc_transport.return_value = None
client = SnapshotsV1Beta3Client(
client_options={"api_endpoint": "squid.clam.whelk"}
)
grpc_transport.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,grpc_helpers",
[
(
SnapshotsV1Beta3Client,
transports.SnapshotsV1Beta3GrpcTransport,
"grpc",
grpc_helpers,
),
(
SnapshotsV1Beta3AsyncClient,
transports.SnapshotsV1Beta3GrpcAsyncIOTransport,
"grpc_asyncio",
grpc_helpers_async,
),
],
)
def test_snapshots_v1_beta3_client_create_channel_credentials_file(
client_class, transport_class, transport_name, grpc_helpers
):
# Check the case credentials file is provided.
options = client_options.ClientOptions(credentials_file="credentials.json")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# test that the credentials from file are saved and used as the credentials.
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch.object(
google.auth, "default", autospec=True
) as adc, mock.patch.object(
grpc_helpers, "create_channel"
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
file_creds = ga_credentials.AnonymousCredentials()
load_creds.return_value = (file_creds, None)
adc.return_value = (creds, None)
client = client_class(client_options=options, transport=transport_name)
create_channel.assert_called_with(
"dataflow.googleapis.com:443",
credentials=file_creds,
credentials_file=None,
quota_project_id=None,
default_scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/compute",
"https://www.googleapis.com/auth/compute.readonly",
"https://www.googleapis.com/auth/userinfo.email",
),
scopes=None,
default_host="dataflow.googleapis.com",
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize("request_type", [snapshots.GetSnapshotRequest, dict,])
def test_get_snapshot(request_type, transport: str = "grpc"):
client = SnapshotsV1Beta3Client(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_snapshot), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = snapshots.Snapshot(
id="id_value",
project_id="project_id_value",
source_job_id="source_job_id_value",
state=snapshots.SnapshotState.PENDING,
description="description_value",
disk_size_bytes=1611,
region="region_value",
)
response = client.get_snapshot(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == snapshots.GetSnapshotRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, snapshots.Snapshot)
assert response.id == "id_value"
assert response.project_id == "project_id_value"
assert response.source_job_id == "source_job_id_value"
assert response.state == snapshots.SnapshotState.PENDING
assert response.description == "description_value"
assert response.disk_size_bytes == 1611
assert response.region == "region_value"
def test_get_snapshot_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = SnapshotsV1Beta3Client(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_snapshot), "__call__") as call:
client.get_snapshot()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == snapshots.GetSnapshotRequest()
@pytest.mark.asyncio
async def test_get_snapshot_async(
transport: str = "grpc_asyncio", request_type=snapshots.GetSnapshotRequest
):
client = SnapshotsV1Beta3AsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_snapshot), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
snapshots.Snapshot(
id="id_value",
project_id="project_id_value",
source_job_id="source_job_id_value",
state=snapshots.SnapshotState.PENDING,
description="description_value",
disk_size_bytes=1611,
region="region_value",
)
)
response = await client.get_snapshot(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == snapshots.GetSnapshotRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, snapshots.Snapshot)
assert response.id == "id_value"
assert response.project_id == "project_id_value"
assert response.source_job_id == "source_job_id_value"
assert response.state == snapshots.SnapshotState.PENDING
assert response.description == "description_value"
assert response.disk_size_bytes == 1611
assert response.region == "region_value"
@pytest.mark.asyncio
async def test_get_snapshot_async_from_dict():
await test_get_snapshot_async(request_type=dict)
@pytest.mark.parametrize("request_type", [snapshots.DeleteSnapshotRequest, dict,])
def test_delete_snapshot(request_type, transport: str = "grpc"):
client = SnapshotsV1Beta3Client(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_snapshot), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = snapshots.DeleteSnapshotResponse()
response = client.delete_snapshot(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == snapshots.DeleteSnapshotRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, snapshots.DeleteSnapshotResponse)
def test_delete_snapshot_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = SnapshotsV1Beta3Client(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_snapshot), "__call__") as call:
client.delete_snapshot()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == snapshots.DeleteSnapshotRequest()
@pytest.mark.asyncio
async def test_delete_snapshot_async(
transport: str = "grpc_asyncio", request_type=snapshots.DeleteSnapshotRequest
):
client = SnapshotsV1Beta3AsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_snapshot), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
snapshots.DeleteSnapshotResponse()
)
response = await client.delete_snapshot(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == snapshots.DeleteSnapshotRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, snapshots.DeleteSnapshotResponse)
@pytest.mark.asyncio
async def test_delete_snapshot_async_from_dict():
await test_delete_snapshot_async(request_type=dict)
@pytest.mark.parametrize("request_type", [snapshots.ListSnapshotsRequest, dict,])
def test_list_snapshots(request_type, transport: str = "grpc"):
client = SnapshotsV1Beta3Client(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_snapshots), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = snapshots.ListSnapshotsResponse()
response = client.list_snapshots(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == snapshots.ListSnapshotsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, snapshots.ListSnapshotsResponse)
def test_list_snapshots_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = SnapshotsV1Beta3Client(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_snapshots), "__call__") as call:
client.list_snapshots()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == snapshots.ListSnapshotsRequest()
@pytest.mark.asyncio
async def test_list_snapshots_async(
transport: str = "grpc_asyncio", request_type=snapshots.ListSnapshotsRequest
):
client = SnapshotsV1Beta3AsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_snapshots), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
snapshots.ListSnapshotsResponse()
)
response = await client.list_snapshots(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == snapshots.ListSnapshotsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, snapshots.ListSnapshotsResponse)
@pytest.mark.asyncio
async def test_list_snapshots_async_from_dict():
await test_list_snapshots_async(request_type=dict)
def test_credentials_transport_error():
# It is an error to provide credentials and a transport instance.
transport = transports.SnapshotsV1Beta3GrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = SnapshotsV1Beta3Client(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# It is an error to provide a credentials file and a transport instance.
transport = transports.SnapshotsV1Beta3GrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = SnapshotsV1Beta3Client(
client_options={"credentials_file": "credentials.json"},
transport=transport,
)
# It is an error to provide an api_key and a transport instance.
transport = transports.SnapshotsV1Beta3GrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
options = client_options.ClientOptions()
options.api_key = "api_key"
with pytest.raises(ValueError):
client = SnapshotsV1Beta3Client(client_options=options, transport=transport,)
# It is an error to provide an api_key and a credential.
options = mock.Mock()
options.api_key = "api_key"
with pytest.raises(ValueError):
client = SnapshotsV1Beta3Client(
client_options=options, credentials=ga_credentials.AnonymousCredentials()
)
# It is an error to provide scopes and a transport instance.
transport = transports.SnapshotsV1Beta3GrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = SnapshotsV1Beta3Client(
client_options={"scopes": ["1", "2"]}, transport=transport,
)
def test_transport_instance():
# A client may be instantiated with a custom transport instance.
transport = transports.SnapshotsV1Beta3GrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
client = SnapshotsV1Beta3Client(transport=transport)
assert client.transport is transport
def test_transport_get_channel():
# A client may be instantiated with a custom transport instance.
transport = transports.SnapshotsV1Beta3GrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
transport = transports.SnapshotsV1Beta3GrpcAsyncIOTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
@pytest.mark.parametrize(
"transport_class",
[
transports.SnapshotsV1Beta3GrpcTransport,
transports.SnapshotsV1Beta3GrpcAsyncIOTransport,
],
)
def test_transport_adc(transport_class):
# Test default credentials are used if not provided.
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class()
adc.assert_called_once()
def test_transport_grpc_default():
# A client should use the gRPC transport by default.
client = SnapshotsV1Beta3Client(credentials=ga_credentials.AnonymousCredentials(),)
assert isinstance(client.transport, transports.SnapshotsV1Beta3GrpcTransport,)
def test_snapshots_v1_beta3_base_transport_error():
# Passing both a credentials object and credentials_file should raise an error
with pytest.raises(core_exceptions.DuplicateCredentialArgs):
transport = transports.SnapshotsV1Beta3Transport(
credentials=ga_credentials.AnonymousCredentials(),
credentials_file="credentials.json",
)
def test_snapshots_v1_beta3_base_transport():
# Instantiate the base transport.
with mock.patch(
"google.cloud.dataflow_v1beta3.services.snapshots_v1_beta3.transports.SnapshotsV1Beta3Transport.__init__"
) as Transport:
Transport.return_value = None
transport = transports.SnapshotsV1Beta3Transport(
credentials=ga_credentials.AnonymousCredentials(),
)
# Every method on the transport should just blindly
# raise NotImplementedError.
methods = (
"get_snapshot",
"delete_snapshot",
"list_snapshots",
)
for method in methods:
with pytest.raises(NotImplementedError):
getattr(transport, method)(request=object())
with pytest.raises(NotImplementedError):
transport.close()
def test_snapshots_v1_beta3_base_transport_with_credentials_file():
# Instantiate the base transport with a credentials file
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch(
"google.cloud.dataflow_v1beta3.services.snapshots_v1_beta3.transports.SnapshotsV1Beta3Transport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.SnapshotsV1Beta3Transport(
credentials_file="credentials.json", quota_project_id="octopus",
)
load_creds.assert_called_once_with(
"credentials.json",
scopes=None,
default_scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/compute",
"https://www.googleapis.com/auth/compute.readonly",
"https://www.googleapis.com/auth/userinfo.email",
),
quota_project_id="octopus",
)
def test_snapshots_v1_beta3_base_transport_with_adc():
# Test the default credentials are used if credentials and credentials_file are None.
with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch(
"google.cloud.dataflow_v1beta3.services.snapshots_v1_beta3.transports.SnapshotsV1Beta3Transport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.SnapshotsV1Beta3Transport()
adc.assert_called_once()
def test_snapshots_v1_beta3_auth_adc():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
SnapshotsV1Beta3Client()
adc.assert_called_once_with(
scopes=None,
default_scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/compute",
"https://www.googleapis.com/auth/compute.readonly",
"https://www.googleapis.com/auth/userinfo.email",
),
quota_project_id=None,
)
@pytest.mark.parametrize(
"transport_class",
[
transports.SnapshotsV1Beta3GrpcTransport,
transports.SnapshotsV1Beta3GrpcAsyncIOTransport,
],
)
def test_snapshots_v1_beta3_transport_auth_adc(transport_class):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
adc.assert_called_once_with(
scopes=["1", "2"],
default_scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/compute",
"https://www.googleapis.com/auth/compute.readonly",
"https://www.googleapis.com/auth/userinfo.email",
),
quota_project_id="octopus",
)
@pytest.mark.parametrize(
"transport_class,grpc_helpers",
[
(transports.SnapshotsV1Beta3GrpcTransport, grpc_helpers),
(transports.SnapshotsV1Beta3GrpcAsyncIOTransport, grpc_helpers_async),
],
)
def test_snapshots_v1_beta3_transport_create_channel(transport_class, grpc_helpers):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(
google.auth, "default", autospec=True
) as adc, mock.patch.object(
grpc_helpers, "create_channel", autospec=True
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
adc.return_value = (creds, None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
create_channel.assert_called_with(
"dataflow.googleapis.com:443",
credentials=creds,
credentials_file=None,
quota_project_id="octopus",
default_scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/compute",
"https://www.googleapis.com/auth/compute.readonly",
"https://www.googleapis.com/auth/userinfo.email",
),
scopes=["1", "2"],
default_host="dataflow.googleapis.com",
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize(
"transport_class",
[
transports.SnapshotsV1Beta3GrpcTransport,
transports.SnapshotsV1Beta3GrpcAsyncIOTransport,
],
)
def test_snapshots_v1_beta3_grpc_transport_client_cert_source_for_mtls(transport_class):
cred = ga_credentials.AnonymousCredentials()
# Check ssl_channel_credentials is used if provided.
with mock.patch.object(transport_class, "create_channel") as mock_create_channel:
mock_ssl_channel_creds = mock.Mock()
transport_class(
host="squid.clam.whelk",
credentials=cred,
ssl_channel_credentials=mock_ssl_channel_creds,
)
mock_create_channel.assert_called_once_with(
"squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_channel_creds,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls
# is used.
with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()):
with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred:
transport_class(
credentials=cred,
client_cert_source_for_mtls=client_cert_source_callback,
)
expected_cert, expected_key = client_cert_source_callback()
mock_ssl_cred.assert_called_once_with(
certificate_chain=expected_cert, private_key=expected_key
)
def test_snapshots_v1_beta3_host_no_port():
client = SnapshotsV1Beta3Client(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="dataflow.googleapis.com"
),
)
assert client.transport._host == "dataflow.googleapis.com:443"
def test_snapshots_v1_beta3_host_with_port():
client = SnapshotsV1Beta3Client(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="dataflow.googleapis.com:8000"
),
)
assert client.transport._host == "dataflow.googleapis.com:8000"
def test_snapshots_v1_beta3_grpc_transport_channel():
channel = grpc.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.SnapshotsV1Beta3GrpcTransport(
host="squid.clam.whelk", channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
def test_snapshots_v1_beta3_grpc_asyncio_transport_channel():
channel = aio.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.SnapshotsV1Beta3GrpcAsyncIOTransport(
host="squid.clam.whelk", channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[
transports.SnapshotsV1Beta3GrpcTransport,
transports.SnapshotsV1Beta3GrpcAsyncIOTransport,
],
)
def test_snapshots_v1_beta3_transport_channel_mtls_with_client_cert_source(
transport_class,
):
with mock.patch(
"grpc.ssl_channel_credentials", autospec=True
) as grpc_ssl_channel_cred:
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_ssl_cred = mock.Mock()
grpc_ssl_channel_cred.return_value = mock_ssl_cred
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
cred = ga_credentials.AnonymousCredentials()
with pytest.warns(DeprecationWarning):
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (cred, None)
transport = transport_class(
host="squid.clam.whelk",
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=client_cert_source_callback,
)
adc.assert_called_once()
grpc_ssl_channel_cred.assert_called_once_with(
certificate_chain=b"cert bytes", private_key=b"key bytes"
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
assert transport._ssl_channel_credentials == mock_ssl_cred
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[
transports.SnapshotsV1Beta3GrpcTransport,
transports.SnapshotsV1Beta3GrpcAsyncIOTransport,
],
)
def test_snapshots_v1_beta3_transport_channel_mtls_with_adc(transport_class):
mock_ssl_cred = mock.Mock()
with mock.patch.multiple(
"google.auth.transport.grpc.SslCredentials",
__init__=mock.Mock(return_value=None),
ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred),
):
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
mock_cred = mock.Mock()
with pytest.warns(DeprecationWarning):
transport = transport_class(
host="squid.clam.whelk",
credentials=mock_cred,
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=None,
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=mock_cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
def test_common_billing_account_path():
billing_account = "squid"
expected = "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
actual = SnapshotsV1Beta3Client.common_billing_account_path(billing_account)
assert expected == actual
def test_parse_common_billing_account_path():
expected = {
"billing_account": "clam",
}
path = SnapshotsV1Beta3Client.common_billing_account_path(**expected)
# Check that the path construction is reversible.
actual = SnapshotsV1Beta3Client.parse_common_billing_account_path(path)
assert expected == actual
def test_common_folder_path():
folder = "whelk"
expected = "folders/{folder}".format(folder=folder,)
actual = SnapshotsV1Beta3Client.common_folder_path(folder)
assert expected == actual
def test_parse_common_folder_path():
expected = {
"folder": "octopus",
}
path = SnapshotsV1Beta3Client.common_folder_path(**expected)
# Check that the path construction is reversible.
actual = SnapshotsV1Beta3Client.parse_common_folder_path(path)
assert expected == actual
def test_common_organization_path():
organization = "oyster"
expected = "organizations/{organization}".format(organization=organization,)
actual = SnapshotsV1Beta3Client.common_organization_path(organization)
assert expected == actual
def test_parse_common_organization_path():
expected = {
"organization": "nudibranch",
}
path = SnapshotsV1Beta3Client.common_organization_path(**expected)
# Check that the path construction is reversible.
actual = SnapshotsV1Beta3Client.parse_common_organization_path(path)
assert expected == actual
def test_common_project_path():
project = "cuttlefish"
expected = "projects/{project}".format(project=project,)
actual = SnapshotsV1Beta3Client.common_project_path(project)
assert expected == actual
def test_parse_common_project_path():
expected = {
"project": "mussel",
}
path = SnapshotsV1Beta3Client.common_project_path(**expected)
# Check that the path construction is reversible.
actual = SnapshotsV1Beta3Client.parse_common_project_path(path)
assert expected == actual
def test_common_location_path():
project = "winkle"
location = "nautilus"
expected = "projects/{project}/locations/{location}".format(
project=project, location=location,
)
actual = SnapshotsV1Beta3Client.common_location_path(project, location)
assert expected == actual
def test_parse_common_location_path():
expected = {
"project": "scallop",
"location": "abalone",
}
path = SnapshotsV1Beta3Client.common_location_path(**expected)
# Check that the path construction is reversible.
actual = SnapshotsV1Beta3Client.parse_common_location_path(path)
assert expected == actual
def test_client_with_default_client_info():
client_info = gapic_v1.client_info.ClientInfo()
with mock.patch.object(
transports.SnapshotsV1Beta3Transport, "_prep_wrapped_messages"
) as prep:
client = SnapshotsV1Beta3Client(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
with mock.patch.object(
transports.SnapshotsV1Beta3Transport, "_prep_wrapped_messages"
) as prep:
transport_class = SnapshotsV1Beta3Client.get_transport_class()
transport = transport_class(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
@pytest.mark.asyncio
async def test_transport_close_async():
client = SnapshotsV1Beta3AsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio",
)
with mock.patch.object(
type(getattr(client.transport, "grpc_channel")), "close"
) as close:
async with client:
close.assert_not_called()
close.assert_called_once()
def test_transport_close():
transports = {
"grpc": "_grpc_channel",
}
for transport, close_name in transports.items():
client = SnapshotsV1Beta3Client(
credentials=ga_credentials.AnonymousCredentials(), transport=transport
)
with mock.patch.object(
type(getattr(client.transport, close_name)), "close"
) as close:
with client:
close.assert_not_called()
close.assert_called_once()
def test_client_ctx():
transports = [
"grpc",
]
for transport in transports:
client = SnapshotsV1Beta3Client(
credentials=ga_credentials.AnonymousCredentials(), transport=transport
)
# Test client calls underlying transport.
with mock.patch.object(type(client.transport), "close") as close:
close.assert_not_called()
with client:
pass
close.assert_called()
@pytest.mark.parametrize(
"client_class,transport_class",
[
(SnapshotsV1Beta3Client, transports.SnapshotsV1Beta3GrpcTransport),
(SnapshotsV1Beta3AsyncClient, transports.SnapshotsV1Beta3GrpcAsyncIOTransport),
],
)
def test_api_key_credentials(client_class, transport_class):
with mock.patch.object(
google.auth._default, "get_api_key_credentials", create=True
) as get_api_key_credentials:
mock_cred = mock.Mock()
get_api_key_credentials.return_value = mock_cred
options = client_options.ClientOptions()
options.api_key = "api_key"
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=mock_cred,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
| googleapis/python-dataflow-client | tests/unit/gapic/dataflow_v1beta3/test_snapshots_v1_beta3.py | Python | apache-2.0 | 58,650 | [
"Octopus"
] | a4eb5f10c17e31470eb5861a78bfa1db3880fbb4dfab39362c604ea04d9ec887 |
import numpy as np
# This class generates a 2D dataset with two classes, "positive" and "negative".
# Each class follows a Gaussian distribution.
class SimpleDataSet:
""" A simple two dimensional dataset for visualization purposes. The date set contains points from two gaussians with mean u_i and std_i"""
def __init__(self, nr_examples=100, g1=[[-5, -5], 1], g2=[[5, 5], 1], balance=0.5, split=[0.8, 0, 0.2], seed=1):
# use a given seed for controled tests
np.random.seed(seed)
nr_positive = int(nr_examples * balance) # number of examples of "positive" class
nr_negative = int(nr_examples - nr_positive) # number of examples of "negative" class
self.mean1 = g1[0] # mean of positive class
self.mean2 = g2[0] # mean of negative class
self.variance1 = g1[1] #
self.variance2 = g2[1]
self.balance = balance
self.nr_points = nr_examples
X_pos_1 = np.random.normal(g1[0][0], g1[1], [nr_positive, 1])
X_pos_2 = np.random.normal(g1[0][1], g1[1], [nr_positive, 1])
X_pos = np.hstack([X_pos_1, X_pos_2])
X_neg_1 = np.random.normal(g2[0][0], g2[1], [nr_negative, 1])
X_neg_2 = np.random.normal(g2[0][1], g2[1], [nr_negative, 1])
X_neg = np.hstack([X_neg_1, X_neg_2])
y_pos = np.zeros([nr_positive, 1], dtype=np.int)
y_neg = np.ones([nr_negative, 1], dtype=np.int)
X = np.vstack([X_pos, X_neg])
y = np.vstack([y_pos, y_neg])
perm = np.random.permutation(nr_examples)
self.split = split
self.X = X[perm, :]
self.y = y[perm]
train_y, dev_y, test_y, train_X, dev_X, test_X = split_train_dev_test(self.X, self.y, split[0], split[1], split[2])
self.train_X = train_X
self.train_y = train_y
self.dev_X = dev_X
self.dev_y = dev_y
self.test_X = test_X
self.test_y = test_y
def get_name(self):
return "Simple Data Set -- Mean1= (%.2f,%.2f) Var1 = %.2f Mean2= (%.2f,%.2f) Var2= %.2f \nNr. Points=%.2f, " \
"Balance=%.2f Train-Dev-Test (%.2f,.%.2f,%.2f)" % (
self.mean1[0], self.mean1[1], self.variance1, self.mean2[0], self.mean2[1], self.variance2, self.nr_points,
self.balance, self.split[0], self.split[1], self.split[2])
def get_bayes_optimal(self):
params = np.zeros((3, 2))
p1 = self.balance
p2 = 1.0 - self.balance
params[0, 0] = -1.0 / (2.0*self.variance1) * np.dot(self.mean1, self.mean1) + np.log(p1)
params[0, 1] = -1.0 / (2.0*self.variance2) * np.dot(self.mean2, self.mean2) + np.log(p2)
params[1, 0] = 1.0 / self.variance1 * self.mean1[0]
params[2, 0] = 1.0 / self.variance1 * self.mean1[1]
params[1, 1] = 1.0 / self.variance2 * self.mean2[0]
params[2, 1] = 1.0 / self.variance2 * self.mean2[1]
print(params)
return params
def plot_data(self, params=np.array([]), name="Naive Bayes", print_bayes_opt=True, backend=None):
import matplotlib
import matplotlib.pyplot as plt
if backend is not None:
matplotlib.use(backend)
fig = plt.figure()
fig.suptitle(self.get_name())
axis = fig.add_subplot(1, 1, 1)
idx, _ = np.nonzero(self.train_y == 0)
idx2, _ = np.nonzero(self.train_y == 1)
idx3, _ = np.nonzero(self.test_y == 0)
idx4, _ = np.nonzero(self.test_y == 1)
axis.scatter(self.train_X[idx, 0], self.train_X[idx, 1], s=30, c="red", marker='s')
axis.scatter(self.train_X[idx2, 0], self.train_X[idx2, 1], s=30, c="blue", marker='s')
if idx3.shape[0] > 0:
axis.scatter(self.test_X[idx3, 0], self.test_X[idx3, 1], s=30, c="red", marker='o')
if idx4.shape[0] > 0:
axis.scatter(self.test_X[idx4, 0], self.test_X[idx4, 1], s=30, c="blue", marker='o')
# Plot Bayes optimal
if print_bayes_opt:
bayes_opt_params = self.get_bayes_optimal()
self.add_line(fig, axis, bayes_opt_params, "Bayes Optimal", "black")
axis.legend()
# fig.show()
return fig, axis
def add_line(self, fig, axis, params, name, colour):
x_max = np.max(self.train_X)
x_min = np.min(self.train_X)
x = np.arange(x_min, x_max, 0.1, dtype="float")
y_star = (
(params[1, 1]-params[1, 0])*x + (params[0, 1]-params[0, 0])
) / (params[2, 0]-params[2, 1])
axis.plot(x, y_star, 'g--', c=colour, label=name, linewidth=2)
axis.legend()
return fig, axis
def split_train_dev_test(X, y, train_per, dev_per, test_per):
if train_per+dev_per+test_per > 1:
print("Train Dev Test split should sum to one")
return
dim = y.shape[0]
split1 = int(dim * train_per)
if dev_per == 0:
train_y, test_y = np.vsplit(y, [split1])
dev_y = np.array([])
train_X = X[0:split1, :]
dev_X = np.array([])
test_X = X[split1:, :]
else:
split2 = int(dim * (train_per+dev_per))
print(split2)
train_y, dev_y, test_y = np.vsplit(y, (split1, split2))
train_X = X[0:split1, :]
dev_X = X[split1:split2, :]
test_X = X[split2:, :]
return train_y, dev_y, test_y, train_X, dev_X, test_X
| LxMLS/lxmls-toolkit | lxmls/readers/simple_data_set.py | Python | mit | 5,344 | [
"Gaussian"
] | 2e678c74fd940e9cdd2b288d6a7f93b0a1d01cac24eebd210e6ef4c4869f791d |
#!/usr/bin/env python2.6
import ssl
import xmlrpc.client
import configparser
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-f", "--from-slice-id", dest="fromSliceID",
help="From Slice ID, unique string")
parser.add_option("-t", "--to-slice-id", dest="toSliceID",
help="To Slice ID, unique string")
parser.add_option("-o", "--from-reservation", dest="fromReservation",
help="From Reservation GUID")
parser.add_option("-x", "--to-reservation", dest="toReservation",
help="To Reservation GUID")
parser.add_option("-s", "--server", dest="server",
help="XMLRPC server URL", metavar="URL", default="https://geni.renci.org:11443/orca/xmlrpc")
parser.add_option("-c", "--cert", dest="cert",
help="PEM file with cert")
parser.add_option("-p", "--private-key", dest="privateKey",
help="Private key file (or a PEM file if contains both private key and cert)")
parser.add_option("-e", "--secret", dest="secret",
help="Secret password")
(options, args) = parser.parse_args()
mandatories = ['fromSliceID', 'toSliceID', 'fromReservation', 'toReservation', 'secret' ]
for m in mandatories:
if not options.__dict__[m]:
print ("Mandatory option is missing\n")
parser.print_help()
exit(-1)
# Create an object to represent our server.
server_url = options.server;
credentials = []
props = { 'ip': '172.16.100.100' }
if server_url.startswith('https://'):
if options.cert == None or options.privateKey == None:
print ("For using secure (https) transport, you must specify the path to your certificate and private key")
parser.print_help()
exit(-1)
# create secure transport with client cert
context = ssl.SSLContext()
context.load_cert_chain(options.cert, options.privateKey)
server = xmlrpc.client.ServerProxy(server_url, context=context)
else:
server = xmlrpc.client.ServerProxy(server_url)
# Call the server and get our result.
print ("Issuing perform slice stitch command for reservation ... \n")
result = server.orca.performSliceStitch(options.fromSliceID, options.fromReservation, options.toSliceID, options.toReservation, options.secret, props, credentials)
print (result)
| RENCI-NRIG/ahab | ahab_py/bin/performSliceStitch.py | Python | epl-1.0 | 2,317 | [
"ORCA"
] | 686129081b6d0698fe4d240693286159d1779512a4b3882a8a3cabbca48ca80f |
# -*- coding: utf-8 -*-
# run_cell.py ---
#
# Filename: run_cell.py
# Description:
# Author:
# Maintainer: P Gleeson
# Version:
# URL:
# Keywords:
# Compatibility:
#
#
# Commentary:
#
#
#
#
# Change log:
# Sunday 16 September 2018 10:04:24 AM IST
# - Tweaked file to to make it compatible with moose.
#
#
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street, Fifth
# Floor, Boston, MA 02110-1301, USA.
#
#
# Code:
import moose
import moose.utils as mu
import sys
import numpy as np
# NOTE: This script does not work with python3
# See https://github.com/NeuroML/NeuroML2/issues/116 . If this bug is fixed then
# remove this code block.
import neuroml as nml
a = nml.nml.nml.IonChannel()
try:
b = {a : 1 }
except TypeError as e:
print( 'Failed due to https://github.com/NeuroML/NeuroML2/issues/116' )
quit( 0 )
def run(nogui):
filename = 'passiveCell.nml'
print('Loading: %s'%filename)
reader = moose.mooseReadNML2( filename )
assert reader
reader.read(filename)
msoma = reader.getComp(reader.doc.networks[0].populations[0].id,0,0)
print(msoma)
data = moose.Neutral('/data')
pg = reader.getInput('pulseGen1')
inj = moose.Table('%s/pulse' % (data.path))
moose.connect(inj, 'requestOut', pg, 'getOutputValue')
vm = moose.Table('%s/Vm' % (data.path))
moose.connect(vm, 'requestOut', msoma, 'getVm')
simdt = 1e-6
plotdt = 1e-4
simtime = 150e-3
for i in range(8):
moose.setClock( i, simdt )
moose.setClock( 8, plotdt )
moose.reinit()
moose.start(simtime)
print("Finished simulation!")
t = np.linspace(0, simtime, len(vm.vector))
if not nogui:
import matplotlib.pyplot as plt
plt.subplot(211)
plt.plot(t, vm.vector * 1e3, label='Vm (mV)')
plt.legend()
plt.title('Vm')
plt.subplot(212)
plt.title('Input')
plt.plot(t, inj.vector * 1e9, label='injected (nA)')
#plt.plot(t, gK.vector * 1e6, label='K')
#plt.plot(t, gNa.vector * 1e6, label='Na')
plt.legend()
plt.show()
plt.close()
if __name__ == '__main__':
nogui = '-nogui' in sys.argv
run(nogui)
| BhallaLab/moose-examples | neuroml2/run_cell.py | Python | gpl-2.0 | 2,868 | [
"MOOSE"
] | 8db7611103d21f50f11aad8adbf84d958ff22ffb1db031ee31c804c782834027 |
# -*- coding: utf-8 -*-
import moose
import numpy as np
from collections import Counter
def xyPosition(objInfo,xory):
try:
return(float(moose.element(objInfo).getField(xory)))
except ValueError:
return (float(0))
def setupMeshObj(modelRoot):
''' Setup compartment and its members pool,reaction,enz cplx under self.meshEntry dictionaries \
self.meshEntry with "key" as compartment,
value is key2:list where key2 represents moose object type,list of objects of a perticular type
e.g self.meshEntry[meshEnt] = { 'reaction': reaction_list,'enzyme':enzyme_list,'pool':poollist,'cplx': cplxlist }
'''
xmin = 0.0
xmax = 1.0
ymin = 0.0
ymax = 1.0
listOfitems = {}
positionInfoExist = True
meshEntry = {}
if meshEntry:
meshEntry.clear()
else:
meshEntry = {}
xcord = []
ycord = []
meshEntryWildcard = '/##[ISA=ChemCompt]'
if modelRoot != '/':
meshEntryWildcard = modelRoot+meshEntryWildcard
for meshEnt in moose.wildcardFind(meshEntryWildcard):
mollist = []
realist = []
enzlist = []
cplxlist = []
tablist = []
funclist = []
mol_cpl = moose.wildcardFind(meshEnt.path+'/##[ISA=PoolBase]')
funclist = moose.wildcardFind(meshEnt.path+'/##[ISA=Function]')
enzlist = moose.wildcardFind(meshEnt.path+'/##[ISA=EnzBase]')
realist = moose.wildcardFind(meshEnt.path+'/##[ISA=ReacBase]')
tablist = moose.wildcardFind(meshEnt.path+'/##[ISA=StimulusTable]')
if mol_cpl or funclist or enzlist or realist or tablist:
for m in mol_cpl:
if isinstance(moose.element(m.parent),moose.CplxEnzBase):
cplxlist.append(m)
objInfo = m.parent.path+'/info'
elif isinstance(moose.element(m),moose.PoolBase):
mollist.append(m)
objInfo =m.path+'/info'
if moose.exists(objInfo):
listOfitems[moose.element(moose.element(objInfo).parent)]={'x':xyPosition(objInfo,'x'),'y':xyPosition(objInfo,'y')}
xcord.append(xyPosition(objInfo,'x'))
ycord.append(xyPosition(objInfo,'y'))
getxyCord(xcord,ycord,funclist,listOfitems)
getxyCord(xcord,ycord,enzlist,listOfitems)
getxyCord(xcord,ycord,realist,listOfitems)
getxyCord(xcord,ycord,tablist,listOfitems)
meshEntry[meshEnt] = {'enzyme':enzlist,
'reaction':realist,
'pool':mollist,
'cplx':cplxlist,
'table':tablist,
'function':funclist
}
positionInfoExist = not(len(np.nonzero(xcord)[0]) == 0 \
and len(np.nonzero(ycord)[0]) == 0)
if positionInfoExist:
xmin = min(xcord)
xmax = max(xcord)
ymin = min(ycord)
ymax = max(ycord)
return meshEntry,xmin,xmax,ymin,ymax,positionInfoExist,listOfitems
def sizeHint(self):
return QtCore.QSize(800,400)
def getxyCord(xcord,ycord,list1,listOfitems):
for item in list1:
# if isinstance(item,Function):
# objInfo = moose.element(item.parent).path+'/info'
# else:
# objInfo = item.path+'/info'
if not isinstance(item,moose.Function):
objInfo = item.path+'/info'
xcord.append(xyPosition(objInfo,'x'))
ycord.append(xyPosition(objInfo,'y'))
if moose.exists(objInfo):
listOfitems[moose.element(moose.element(objInfo).parent)]={'x':xyPosition(objInfo,'x'),'y':xyPosition(objInfo,'y')}
def setupItem(modelPath,cntDict):
'''This function collects information of what is connected to what. \
eg. substrate and product connectivity to reaction's and enzyme's \
sumtotal connectivity to its pool are collected '''
#print " setupItem"
sublist = []
prdlist = []
zombieType = ['ReacBase','EnzBase','Function','StimulusTable']
for baseObj in zombieType:
path = '/##[ISA='+baseObj+']'
if modelPath != '/':
path = modelPath+path
if ( (baseObj == 'ReacBase') or (baseObj == 'EnzBase')):
for items in moose.wildcardFind(path):
sublist = []
prdlist = []
uniqItem,countuniqItem = countitems(items,'subOut')
subNo = uniqItem
for sub in uniqItem:
sublist.append((moose.element(sub),'s',countuniqItem[sub]))
uniqItem,countuniqItem = countitems(items,'prd')
prdNo = uniqItem
if (len(subNo) == 0 or len(prdNo) == 0):
print ("Substrate Product is empty ",path, " ",items)
for prd in uniqItem:
prdlist.append((moose.element(prd),'p',countuniqItem[prd]))
if (baseObj == 'CplxEnzBase') :
uniqItem,countuniqItem = countitems(items,'toEnz')
for enzpar in uniqItem:
sublist.append((moose.element(enzpar),'t',countuniqItem[enzpar]))
uniqItem,countuniqItem = countitems(items,'cplxDest')
for cplx in uniqItem:
prdlist.append((moose.element(cplx),'cplx',countuniqItem[cplx]))
if (baseObj == 'EnzBase'):
uniqItem,countuniqItem = countitems(items,'enzDest')
for enzpar in uniqItem:
sublist.append((moose.element(enzpar),'t',countuniqItem[enzpar]))
cntDict[items] = sublist,prdlist
elif baseObj == 'Function':
for items in moose.wildcardFind(path):
sublist = []
prdlist = []
item = items.path+'/x[0]'
uniqItem,countuniqItem = countitems(item,'input')
for funcpar in uniqItem:
sublist.append((moose.element(funcpar),'sts',countuniqItem[funcpar]))
uniqItem,countuniqItem = countitems(items,'valueOut')
for funcpar in uniqItem:
prdlist.append((moose.element(funcpar),'stp',countuniqItem[funcpar]))
cntDict[items] = sublist,prdlist
# elif baseObj == 'Function':
# #ZombieSumFunc adding inputs
# inputlist = []
# outputlist = []
# funplist = []
# nfunplist = []
# for items in moose.wildcardFind(path):
# for funplist in moose.element(items).neighbors['valueOut']:
# for func in funplist:
# funcx = moose.element(items.path+'/x[0]')
# uniqItem,countuniqItem = countitems(funcx,'input')
# for inPut in uniqItem:
# inputlist.append((inPut,'st',countuniqItem[inPut]))
# cntDict[func] = inputlist
else:
for tab in moose.wildcardFind(path):
tablist = []
uniqItem,countuniqItem = countitems(tab,'output')
for tabconnect in uniqItem:
tablist.append((moose.element(tabconnect),'tab',countuniqItem[tabconnect]))
cntDict[tab] = tablist
def countitems(mitems,objtype):
items = []
items = moose.element(mitems).neighbors[objtype]
uniqItems = set(items)
countuniqItems = Counter(items)
return(uniqItems,countuniqItems)
| dharmasam9/moose-core | python/moose/chemUtil/chemConnectUtil.py | Python | gpl-3.0 | 7,694 | [
"MOOSE"
] | 33032dd6ecc01743ca0c3f9791a68f198cf212a4a154eba5ff9327224dc5553b |
# Copyright 2014 Sandra Giuliani
# config.py
# Configuration file for drug_repo.py
############################################################################
### PERSONAL INFO
############################################################################
# what is your name?
your_name = "Sandra"
# what is your email? (for NCBI Expasy and T-coffee)
your_email = "sandraxgiuliani@gmail.com"
############################################################################
############################################################################
### PIPELINE STEPS
############################################################################
# define which steps of the pipeline you wish to run
# integer between 0 and 10
# eg steps = 6 will run all steps up to (and including) 6
steps = 8
# step of the pipeline that requires modeller
# only change this if you have shuffled the main function!
modeller_step = 10
############################################################################
############################################################################
### TAXONOMY
############################################################################
# define list of taxa ids you are interested in
# eg ['SCHMA', 'SCHHA', 'SCHJA']
taxa = ['SCHMA']
# to identify a specific species, look up species name in speclist.txt
# to find the mnemonic code
# e.g. Schistosoma
# SCHMA (S. Mansoni), SCHHA (S. haematobium), SCHJA (S. japonicum)
# e.g Trypanosoma
# TRYB2 = Trypanosoma brucei brucei (strain 927/4 GUTat10.1)
# TRYB9 = Trypanosoma brucei gambiense (strain MHOM/CI/86/DAL972)
# TRYBB = Trypanosoma brucei brucei
# TRYBG = Trypanosoma brucei gambiense
# TRYBR = Trypanosoma brucei rhodesiense
# TRYCC = Trypanosoma cruzi (strain CL Brener)
# TRYCI = Trypanosoma congolense (strain IL3000)
# TRYCO = Trypanosoma congolense
# TRYCR = Trypanosoma cruzi
# TRYEQ = Trypanosoma equiperdum
# TRYEV = Trypanosoma evansi
# e.g. plasmodium (there are many others!)
# PLAF1 E 57265: N=Plasmodium falciparum (isolate 311)
############################################################################
############################################################################
### PATHS
############################################################################
# path to archindex binary
# old path "./../archSchema/bin/archindex" still valid on mac
# new path on linux machine "./../Arch/archindex"
archindex_path = "./../archSchema/bin/archindex"
# absolute path to SMSD directory (where SMSD.sh is)
# 1.5.1 - first version I have used (from sourceforge)
# 1.6 - version sent by Asad that should handle multiple sdf and keep ids
# "/home/sandra/SMSD1.6" on linux
# /Users/sandragiuliani/SMSD1.6 on mac
smsd_path = "/Users/sandragiuliani/SMSD1.6"
############################################################################
############################################################################
### SETS AND FILTERING SETTINGS
############################################################################
# which sets to analyse
# e.g. ['A'] -> just ChEMBL
# e.g. ['A', 'B'] -> both ChEMBL and DrugBank
sets = ['A']
dataset_dic = {'A': 'ChEMBL', 'B': 'DrugBank'}
# chembl filter settings
# define list of clinical phases you are interested in
# (only applies to ChEMBL set)
# eg. '4', '3', '' (empty string for the unknown phase)
chembl_phases = ['4']
# define molecule types you are interested in
chembl_mol_type = ['Synthetic Small Molecule']
############################################################################
############################################################################
### CLUSTERING SETTINGS
############################################################################
# define similarity threshold for clustering
# e.g. 0.9
sim_threshold = 0.9
############################################################################
############################################################################
### REPOSITIONING CANDIDATE
############################################################################
# repositioning candidate to be examined
# put CHEMBL or DB ID eg 'CHEMBL98', 'DB03292'
repo_candidate = 'CHEMBL973'
# target number, for selecting which drug target to align to the potential
# parasite targets.
# 0 is the first one (could be the only one), 1 the second one...
repo_target_no = 0
############################################################################
############################################################################
### HOMOLOGY MODEL
############################################################################
# number of homology models to make
model_no = 10
# alignment file - has to be in PIR format
model_align = '1d3h_schma.ali'
# template name - PDB ID of the crystal structure
model_xray = '1d3h'
# sequence to model name - arbitrary name, but has to match in the .ali file
model_seq = 'schma'
############################################################################
############################################################################
### INPUT_FILES
############################################################################
# input files (refer to README for source)
# drug file from ChEMBL ('Browse drugs') 'chembl_drugs.txt'
# number of drugs should be 10406
# FOR TESTING, use 'chembl_drugs_test.txt'
chembl_input = 'chembl_drugs.txt'
# define CHEMBL_TARGETS as the target file from ChEMBL ('Browse drug targets')
# number of drugs associated with targets should be 2007
chembl_targets = 'chembl_drugtargets.txt'
# define CHEMBL_UNIPROT as the chemblID/uniprot mapping file
chembl_uniprot = 'chembl_uniprot_mapping.txt'
# define DRUGBANK_INPUT as the DrugBank Drug Target Identifiers
# either: all_target_ids_all.csv (all drugs, 4,026 entries),
# or: small_molecule_target_ids_all.csv (small molecule drugs, 3,899 entries)
# FOR TESTING, use 'small_molecule_target_ids_all_test.csv'
drugbank_input = 'small_molecule_target_ids_all.csv'
# define sdf file with drugbank drugs (contains smiles)
drugbank_sdf = 'all.sdf'
# uniprot to pdb csv mapping file
# if necessary, uniprot_pdb.tsv (tsv version) can be retrieved
uniprot_pdb = "uniprot_pdb.csv"
# pdb to lig mapping file
pdb_lig = "lig_pairs.lst"
# pointless het groups
pointless_het = "pointless_het.csv"
# chemical component smiles dictionary
cc_smi = "Components-smiles-oe.smi"
# location of the species codes to species names mapping file
spec_list = 'speclist.txt'
# pdb to pfam residue mapping
pdb_to_pfam = 'pdb_pfam_mapping.txt'
# uniprot to cath residue mapping
uniprot_cath = 'arch_schema_cath.tsv'
############################################################################
############################################################################
### OUTPUT_FILES
############################################################################
# define names of output files, they will be overwritten every time
# if you do not want that to happen, add a timestamp to the file names
# 'dr' stands for drug repositioning
# other temporary files will also be named dr_*
# log
log_name = 'dr_log.log'
#tcoffee log
t_coffee = 'dr_tcoffee.log'
# chembl similarity scores written to file
chembl_clust_sim_scores = 'dr_chembl_clust_sim_scores.txt'
# chembl cluster to be imported in excel
# clustered drugs with info from chembl! (no mapping info)
chembl_cluster = 'dr_chembl_clust_excel.txt'
############################################################################ | sandygiuliani/drug_repo2 | config.py | Python | mit | 7,501 | [
"CRYSTAL"
] | d621cfb6eb948afa7f5fca944340e646f74708b39750f3ad8898fb824a4a5a79 |
import pytest
import os, shutil, glob
import numpy as np
from flare.struc import Structure, get_unique_species
from flare.dft_interface.qe_util import (
parse_dft_input,
parse_dft_forces,
run_dft_par,
edit_dft_input_positions,
dft_input_to_structure,
)
def cleanup_espresso_run(basename: str):
for f in glob.glob(f"{basename}*"):
if os.path.isfile(f):
os.remove(f)
else:
shutil.rmtree(f)
# ------------------------------------------------------
# test otf helper functions
# ------------------------------------------------------
@pytest.mark.parametrize(
"qe_input,exp_spec", [("./test_files/qe_input_1.in", ["H", "H"])]
)
def test_species_parsing(qe_input, exp_spec):
positions, species, cell, masses = parse_dft_input(qe_input)
assert len(species) == len(exp_spec)
for i, spec in enumerate(species):
assert spec == exp_spec[i]
@pytest.mark.parametrize(
"qe_input,exp_cell", [("./test_files/qe_input_1.in", 5.0 * np.eye(3))]
)
def test_cell_parsing(qe_input, exp_cell):
positions, species, cell, masses = parse_dft_input(qe_input)
assert np.all(exp_cell == cell)
# @pytest.mark.parametrize("qe_input,mass_dict",
# [
# ('./test_files/qe_input_1.in',
# {'H': 0.00010364269933008285}),
# ('./test_files/qe_input_2.in',
# {'Al': 0.00010364269933008285 * 26.9815385})
# ]
# )
# def test_cell_parsing(qe_input, mass_dict):
# positions, species, cell, masses = parse_dft_input(qe_input)
# assert masses == mass_dict
@pytest.mark.parametrize(
"qe_input",
[
"./test_files/qe_input_1.in",
"./test_files/qe_input_2.in",
"./test_files/qe_input_3.in",
],
)
def test_input_to_structure(qe_input):
assert isinstance(dft_input_to_structure(qe_input), Structure)
@pytest.mark.parametrize(
"qe_input,qe_output",
[("./test_files/qe_input_1.in", "./test_files/qe_output_1.out")],
)
@pytest.mark.skipif(
not os.environ.get("PWSCF_COMMAND", False),
reason=(
"PWSCF_COMMAND not found "
"in environment: Please install Quantum "
"ESPRESSO and set the PWSCF_COMMAND env. "
"variable to point to pw.x."
),
)
def test_espresso_calling(qe_input, qe_output):
dft_loc = os.environ.get("PWSCF_COMMAND")
shutil.copyfile(qe_input, "pwscf.in")
positions, species, cell, masses = parse_dft_input(qe_input)
structure = Structure(
cell=cell,
species=species,
positions=positions,
mass_dict=masses,
species_labels=species,
)
forces = run_dft_par("pwscf.in", structure, dft_loc, dft_out="pwscf.out")
ref_forces = parse_dft_forces("pwscf.out")
assert len(forces) == len(ref_forces)
for i in range(structure.nat):
assert np.allclose(forces[i], ref_forces[i])
cleanup_espresso_run("pwscf")
def test_espresso_input_edit():
"""
Load a structure in from qe_input_1, change the position and cell,
then edit and re-parse
:return:
"""
os.system("cp test_files/qe_input_1.in .")
positions, species, cell, masses = parse_dft_input("./qe_input_1.in")
_, coded_species = get_unique_species(species)
structure = Structure(
cell, coded_species, positions, masses, species_labels=species
)
structure.positions[0] += np.random.randn(3)
new_file = edit_dft_input_positions("./qe_input_1.in", structure=structure)
positions, species, cell, masses = parse_dft_input(new_file)
assert np.equal(positions[0], structure.positions[0]).all()
assert np.equal(structure.vec1, cell[0, :]).all()
os.remove("qe_input_1.in")
| mir-group/flare | tests/test_qe_util.py | Python | mit | 3,856 | [
"ESPResSo"
] | 7326f145d9c53726e331e0d4873c8e057afafed1a9cc5a5291b246757a7c178e |
"""
This module implements an interface to the critic2 Bader analysis code.
For most Bader analysis purposes, users are referred to
pymatgen.command_line.bader_caller instead, this module is for advanced
usage requiring identification of critical points in the charge density.
This module depends on a compiled critic2 executable available in the path.
Please follow the instructions at https://github.com/aoterodelaroza/critic2
to compile.
New users are *strongly* encouraged to read the critic2 manual first.
In brief,
* critic2 searches for critical points in charge density
* a critical point can be one of four types: nucleus, bond, ring
or cage
* it does this by seeding locations for likely critical points
and then searching in these regions
* there are two lists of critical points in the output, a list
of non-equivalent points (with in-depth information about the
field at those points), and a full list of points generated
by the appropriate symmetry operations
* connectivity between these points is also provided when
appropriate (e.g. the two nucleus critical points linked to
a bond critical point)
* critic2 can do many other things besides
If you use this module, please cite the following:
A. Otero-de-la-Roza, E. R. Johnson and V. Luaña,
Comput. Phys. Commun. 185, 1007-1018 (2014)
(http://dx.doi.org/10.1016/j.cpc.2013.10.026)
A. Otero-de-la-Roza, M. A. Blanco, A. Martín Pendás and
V. Luaña, Comput. Phys. Commun. 180, 157–166 (2009)
(http://dx.doi.org/10.1016/j.cpc.2008.07.018)
"""
import logging
import os
import subprocess
import warnings
from enum import Enum
import numpy as np
from monty.dev import requires
from monty.json import MSONable
from monty.os.path import which
from monty.serialization import loadfn
from monty.tempfile import ScratchDir
from scipy.spatial import KDTree
from pymatgen.analysis.graphs import StructureGraph
from pymatgen.command_line.bader_caller import get_filepath
from pymatgen.core.periodic_table import DummySpecies
from pymatgen.io.vasp.inputs import Potcar
from pymatgen.io.vasp.outputs import Chgcar, VolumetricData
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
class Critic2Caller:
"""
Class to call critic2 and store standard output for further processing.
"""
@requires(
which("critic2"),
"Critic2Caller requires the executable critic to be in the path. "
"Please follow the instructions at https://github.com/aoterodelaroza/critic2.",
)
def __init__(self, input_script):
"""
Run Critic2 on a given input script
:param input_script: string defining the critic2 input
"""
# store if examining the input script is useful,
# not otherwise used
self._input_script = input_script
with open("input_script.cri", "w") as f:
f.write(input_script)
args = ["critic2", "input_script.cri"]
with subprocess.Popen(args, stdout=subprocess.PIPE, stdin=subprocess.PIPE, close_fds=True) as rs:
stdout, stderr = rs.communicate()
stdout = stdout.decode()
if stderr:
stderr = stderr.decode()
warnings.warn(stderr)
if rs.returncode != 0:
raise RuntimeError("critic2 exited with return code {}: {}".format(rs.returncode, stdout))
self._stdout = stdout
self._stderr = stderr
if os.path.exists("cpreport.json"):
cpreport = loadfn("cpreport.json")
else:
cpreport = None
self._cpreport = cpreport
if os.path.exists("yt.json"):
yt = loadfn("yt.json")
else:
yt = None
self._yt = yt
@classmethod
def from_chgcar(
cls,
structure,
chgcar=None,
chgcar_ref=None,
user_input_settings=None,
write_cml=False,
write_json=True,
zpsp=None,
):
"""
Run Critic2 in automatic mode on a supplied structure, charge
density (chgcar) and reference charge density (chgcar_ref).
The reason for a separate reference field is that in
VASP, the CHGCAR charge density only contains valence
electrons and may be missing substantial charge at
nuclei leading to misleading results. Thus, a reference
field is commonly constructed from the sum of AECCAR0
and AECCAR2 which is the total charge density, but then
the valence charge density is used for the final analysis.
If chgcar_ref is not supplied, chgcar will be used as the
reference field. If chgcar is not supplied, the promolecular
charge density will be used as the reference field -- this can
often still give useful results if only topological information
is wanted.
User settings is a dictionary that can contain:
* GRADEPS, float (field units), gradient norm threshold
* CPEPS, float (Bohr units in crystals), minimum distance between
critical points for them to be equivalent
* NUCEPS, same as CPEPS but specifically for nucleus critical
points (critic2 default is depedent on grid dimensions)
* NUCEPSH, same as NUCEPS but specifically for hydrogen nuclei
since associated charge density can be significantly displaced
from hydrogen nucleus
* EPSDEGEN, float (field units), discard critical point if any
element of the diagonal of the Hessian is below this value,
useful for discarding points in vacuum regions
* DISCARD, float (field units), discard critical points with field
value below this value, useful for discarding points in vacuum
regions
* SEED, list of strings, strategies for seeding points, default
is ['WS 1', 'PAIR 10'] which seeds critical points by
sub-dividing the Wigner-Seitz cell and between every atom pair
closer than 10 Bohr, see critic2 manual for more options
:param structure: Structure to analyze
:param chgcar: Charge density to use for analysis. If None, will
use promolecular density. Should be a Chgcar object or path (string).
:param chgcar_ref: Reference charge density. If None, will use
chgcar as reference. Should be a Chgcar object or path (string).
:param user_input_settings (dict): as explained above
:param write_cml (bool): Useful for debug, if True will write all
critical points to a file 'table.cml' in the working directory
useful for visualization
:param write_json (bool): Whether to write out critical points
and YT json. YT integration will be performed with this setting.
:param zpsp (dict): Dict of element/symbol name to number of electrons
(ZVAL in VASP pseudopotential), with which to properly augment core regions
and calculate charge transfer. Optional.
"""
settings = {"CPEPS": 0.1, "SEED": ["WS", "PAIR DIST 10"]}
if user_input_settings:
settings.update(user_input_settings)
# Load crystal structure
input_script = ["crystal POSCAR"]
# Load data to use as reference field
if chgcar_ref:
input_script += ["load ref.CHGCAR id chg_ref", "reference chg_ref"]
# Load data to use for analysis
if chgcar:
input_script += ["load int.CHGCAR id chg_int", "integrable chg_int"]
if zpsp:
zpsp_str = " zpsp " + " ".join(["{} {}".format(symbol, int(zval)) for symbol, zval in zpsp.items()])
input_script[-2] += zpsp_str
# Command to run automatic analysis
auto = "auto "
for k, v in settings.items():
if isinstance(v, list):
for item in v:
auto += "{} {} ".format(k, item)
else:
auto += "{} {} ".format(k, v)
input_script += [auto]
if write_cml:
input_script += ["cpreport ../table.cml cell border graph"]
if write_json:
input_script += ["cpreport cpreport.json"]
if write_json and chgcar:
# requires gridded data to work
input_script += ["yt"]
input_script += ["yt JSON yt.json"]
input_script = "\n".join(input_script)
with ScratchDir(".") as temp_dir:
os.chdir(temp_dir)
structure.to(filename="POSCAR")
if chgcar and isinstance(chgcar, VolumetricData):
chgcar.write_file("int.CHGCAR")
elif chgcar:
os.symlink(chgcar, "int.CHGCAR")
if chgcar_ref and isinstance(chgcar_ref, VolumetricData):
chgcar_ref.write_file("ref.CHGCAR")
elif chgcar_ref:
os.symlink(chgcar_ref, "ref.CHGCAR")
caller = cls(input_script)
caller.output = Critic2Analysis(
structure,
stdout=caller._stdout,
stderr=caller._stderr,
cpreport=caller._cpreport,
yt=caller._yt,
zpsp=zpsp,
)
return caller
@classmethod
def from_path(cls, path, suffix="", zpsp=None):
"""
Convenience method to run critic2 analysis on a folder containing
typical VASP output files.
This method will:
1. Look for files CHGCAR, AECAR0, AECAR2, POTCAR or their gzipped
counterparts.
2. If AECCAR* files are present, constructs a temporary reference
file as AECCAR0 + AECCAR2.
3. Runs critic2 analysis twice: once for charge, and a second time
for the charge difference (magnetization density).
:param path: path to folder to search in
:param suffix: specific suffix to look for (e.g. '.relax1' for
'CHGCAR.relax1.gz')
:param zpsp: manually specify ZPSP if POTCAR not present
:return:
"""
chgcar_path = get_filepath("CHGCAR", "Could not find CHGCAR!", path, suffix)
chgcar = Chgcar.from_file(chgcar_path)
chgcar_ref = None
if not zpsp:
potcar_path = get_filepath(
"POTCAR",
"Could not find POTCAR, will not be able to calculate charge transfer.",
path,
suffix,
)
if potcar_path:
potcar = Potcar.from_file(potcar_path)
zpsp = {p.element: p.zval for p in potcar}
if not zpsp:
# try and get reference "all-electron-like" charge density if zpsp not present
aeccar0_path = get_filepath(
"AECCAR0",
"Could not find AECCAR0, interpret Bader results with caution.",
path,
suffix,
)
aeccar0 = Chgcar.from_file(aeccar0_path) if aeccar0_path else None
aeccar2_path = get_filepath(
"AECCAR2",
"Could not find AECCAR2, interpret Bader results with caution.",
path,
suffix,
)
aeccar2 = Chgcar.from_file(aeccar2_path) if aeccar2_path else None
chgcar_ref = aeccar0.linear_add(aeccar2) if (aeccar0 and aeccar2) else None
return cls.from_chgcar(chgcar.structure, chgcar, chgcar_ref, zpsp=zpsp)
class CriticalPointType(Enum):
"""
Enum type for the different varieties of critical point.
"""
nucleus = "nucleus" # (3, -3)
bond = "bond" # (3, -1)
ring = "ring" # (3, 1)
cage = "cage" # (3, 3)
nnattr = "nnattr" # (3, -3), non-nuclear attractor
class CriticalPoint(MSONable):
"""
Access information about a critical point and the field values at that point.
"""
def __init__(
self,
index,
type,
frac_coords,
point_group,
multiplicity,
field,
field_gradient,
coords=None,
field_hessian=None,
):
"""
Class to characterise a critical point from a topological
analysis of electron charge density.
Note this class is usually associated with a Structure, so
has information on multiplicity/point group symmetry.
:param index: index of point
:param type: type of point, given as a string
:param coords: Cartesian co-ordinates in Angstroms
:param frac_coords: fractional co-ordinates
:param point_group: point group associated with critical point
:param multiplicity: number of equivalent critical points
:param field: value of field at point (f)
:param field_gradient: gradient of field at point (grad f)
:param field_hessian: hessian of field at point (del^2 f)
"""
self.index = index
self._type = type
self.coords = coords
self.frac_coords = frac_coords
self.point_group = point_group
self.multiplicity = multiplicity
self.field = field
self.field_gradient = field_gradient
self.field_hessian = field_hessian
@property
def type(self):
"""
Returns: Instance of CriticalPointType
"""
return CriticalPointType(self._type)
def __str__(self):
return "Critical Point: {} ({})".format(self.type.name, self.frac_coords)
@property
def laplacian(self):
"""
Returns: The Laplacian of the field at the critical point
"""
return np.trace(self.field_hessian)
@property
def ellipticity(self):
"""
Most meaningful for bond critical points,
can be physically interpreted as e.g. degree
of pi-bonding in organic molecules. Consult
literature for more information.
Returns: The ellpiticity of the field at the critical point
"""
eig, _ = np.linalg.eig(self.field_hessian)
eig.sort()
return eig[0] / eig[1] - 1
class Critic2Analysis(MSONable):
"""
Class to process the standard output from critic2 into pymatgen-compatible objects.
"""
def __init__(self, structure, stdout=None, stderr=None, cpreport=None, yt=None, zpsp=None):
"""
This class is used to store results from the Critic2Caller.
To explore the bond graph, use the "structure_graph"
method, which returns a user-friendly StructureGraph
class with bonding information. By default, this returns
a StructureGraph with edge weights as bond lengths, but
can optionally return a graph with edge weights as any
property supported by the `CriticalPoint` class, such as
bond ellipticity.
This class also provides an interface to explore just the
non-symmetrically-equivalent critical points via the
`critical_points` attribute, and also all critical
points (via nodes dict) and connections between them
(via edges dict). The user should be familiar with critic2
before trying to understand these.
Indexes of nucleus critical points in the nodes dict are the
same as the corresponding sites in structure, with indices of
other critical points arbitrarily assigned.
Only one of (stdout, cpreport) required, with cpreport preferred
since this is a new, native JSON output from critic2.
:param structure: associated Structure
:param stdout: stdout from running critic2 in automatic
mode
:param stderr: stderr from running critic2 in automatic
mode
:param cpreport: json output from CPREPORT command
:param yt: json output from YT command
:param zpsp (dict): Dict of element/symbol name to number of electrons
(ZVAL in VASP pseudopotential), with which to calculate charge transfer.
Optional.
"""
self.structure = structure
self._stdout = stdout
self._stderr = stderr
self._cpreport = cpreport
self._yt = yt
self._zpsp = zpsp
self.nodes = {}
self.edges = {}
if yt:
self.structure = self._annotate_structure_with_yt(yt, structure, zpsp)
if cpreport:
self._parse_cpreport(cpreport)
elif stdout:
self._parse_stdout(stdout)
else:
raise ValueError("One of cpreport or stdout required.")
self._remap_indices()
def structure_graph(self, include_critical_points=("bond", "ring", "cage")):
"""
A StructureGraph object describing bonding information
in the crystal.
Args:
include_critical_points: add DummySpecies for
the critical points themselves, a list of
"nucleus", "bond", "ring", "cage", set to None
to disable
Returns: a StructureGraph
"""
structure = self.structure.copy()
point_idx_to_struct_idx = {}
if include_critical_points:
# atoms themselves don't have field information
# so set to 0
for prop in ("ellipticity", "laplacian", "field"):
structure.add_site_property(prop, [0] * len(structure))
for idx, node in self.nodes.items():
cp = self.critical_points[node["unique_idx"]]
if cp.type.value in include_critical_points:
specie = DummySpecies("X{}cp".format(cp.type.value[0]), oxidation_state=None)
structure.append(
specie,
node["frac_coords"],
properties={
"ellipticity": cp.ellipticity,
"laplacian": cp.laplacian,
"field": cp.field,
},
)
point_idx_to_struct_idx[idx] = len(structure) - 1
edge_weight = "bond_length"
edge_weight_units = "Å"
sg = StructureGraph.with_empty_graph(
structure,
name="bonds",
edge_weight_name=edge_weight,
edge_weight_units=edge_weight_units,
)
edges = self.edges.copy()
idx_to_delete = []
# check for duplicate bonds
for idx, edge in edges.items():
unique_idx = self.nodes[idx]["unique_idx"]
# only check edges representing bonds, not rings
if self.critical_points[unique_idx].type == CriticalPointType.bond:
if idx not in idx_to_delete:
for idx2, edge2 in edges.items():
if idx != idx2 and edge == edge2:
idx_to_delete.append(idx2)
warnings.warn(
"Duplicate edge detected, try re-running "
"critic2 with custom parameters to fix this. "
"Mostly harmless unless user is also "
"interested in rings/cages."
)
logger.debug(
"Duplicate edge between points {} (unique point {})"
"and {} ({}).".format(
idx,
self.nodes[idx]["unique_idx"],
idx2,
self.nodes[idx2]["unique_idx"],
)
)
# and remove any duplicate bonds present
for idx in idx_to_delete:
del edges[idx]
for idx, edge in edges.items():
unique_idx = self.nodes[idx]["unique_idx"]
# only add edges representing bonds, not rings
if self.critical_points[unique_idx].type == CriticalPointType.bond:
from_idx = edge["from_idx"]
to_idx = edge["to_idx"]
# have to also check bond is between nuclei if non-nuclear
# attractors not in structure
skip_bond = False
if include_critical_points and "nnattr" not in include_critical_points:
from_type = self.critical_points[self.nodes[from_idx]["unique_idx"]].type
to_type = self.critical_points[self.nodes[from_idx]["unique_idx"]].type
skip_bond = (from_type != CriticalPointType.nucleus) or (to_type != CriticalPointType.nucleus)
if not skip_bond:
from_lvec = edge["from_lvec"]
to_lvec = edge["to_lvec"]
relative_lvec = np.subtract(to_lvec, from_lvec)
# for edge case of including nnattrs in bonding graph when other critical
# points also included, indices may get mixed
struct_from_idx = point_idx_to_struct_idx.get(from_idx, from_idx)
struct_to_idx = point_idx_to_struct_idx.get(to_idx, to_idx)
weight = self.structure.get_distance(struct_from_idx, struct_to_idx, jimage=relative_lvec)
crit_point = self.critical_points[unique_idx]
edge_properties = {
"field": crit_point.field,
"laplacian": crit_point.laplacian,
"ellipticity": crit_point.ellipticity,
"frac_coords": self.nodes[idx]["frac_coords"],
}
sg.add_edge(
struct_from_idx,
struct_to_idx,
from_jimage=from_lvec,
to_jimage=to_lvec,
weight=weight,
edge_properties=edge_properties,
)
return sg
def get_critical_point_for_site(self, n):
"""
Args:
n: Site index n
Returns: A CriticalPoint instance
"""
return self.critical_points[self.nodes[n]["unique_idx"]]
def get_volume_and_charge_for_site(self, n):
"""
Args:
n: Site index n
Returns: A dict containing "volume" and "charge" keys,
or None if YT integration not performed
"""
# pylint: disable=E1101
if not self._node_values:
return None
return self._node_values[n]
def _parse_cpreport(self, cpreport):
def get_type(signature: int, is_nucleus: bool):
if signature == 3:
return "cage"
if signature == 1:
return "ring"
if signature == -1:
return "bond"
if signature == -3:
if is_nucleus:
return "nucleus"
return "nnattr"
return None
bohr_to_angstrom = 0.529177
self.critical_points = [
CriticalPoint(
p["id"] - 1,
get_type(p["signature"], p["is_nucleus"]),
p["fractional_coordinates"],
p["point_group"],
p["multiplicity"],
p["field"],
p["gradient"],
coords=[x * bohr_to_angstrom for x in p["cartesian_coordinates"]]
if cpreport["units"] == "bohr"
else None,
field_hessian=p["hessian"],
)
for p in cpreport["critical_points"]["nonequivalent_cps"]
]
for idx, p in enumerate(cpreport["critical_points"]["cell_cps"]):
self._add_node(
idx=p["id"] - 1,
unique_idx=p["nonequivalent_id"] - 1,
frac_coords=p["fractional_coordinates"],
)
if "attractors" in p:
self._add_edge(
idx=p["id"] - 1,
from_idx=int(p["attractors"][0]["cell_id"]) - 1,
from_lvec=p["attractors"][0]["lvec"],
to_idx=int(p["attractors"][1]["cell_id"]) - 1,
to_lvec=p["attractors"][1]["lvec"],
)
def _remap_indices(self):
"""
Re-maps indices on self.nodes and self.edges such that node indices match
that of structure, and then sorts self.nodes by index.
"""
# Order of nuclei provided by critic2 doesn't
# necessarily match order of sites in Structure.
# This is because critic2 performs a symmetrization step.
# We perform a mapping from one to the other,
# and re-index all nodes accordingly.
node_mapping = {} # critic2_index:structure_index
# ensure frac coords are in [0,1] range
frac_coords = np.array(self.structure.frac_coords) % 1
kd = KDTree(frac_coords)
node_mapping = {}
for idx, node in self.nodes.items():
if self.critical_points[node["unique_idx"]].type == CriticalPointType.nucleus:
node_mapping[idx] = kd.query(node["frac_coords"])[1]
if len(node_mapping) != len(self.structure):
warnings.warn(
"Check that all sites in input structure ({}) have "
"been detected by critic2 ({}).".format(len(self.structure), len(node_mapping))
)
self.nodes = {node_mapping.get(idx, idx): node for idx, node in self.nodes.items()}
for edge in self.edges.values():
edge["from_idx"] = node_mapping.get(edge["from_idx"], edge["from_idx"])
edge["to_idx"] = node_mapping.get(edge["to_idx"], edge["to_idx"])
@staticmethod
def _annotate_structure_with_yt(yt, structure, zpsp):
volume_idx = None
charge_idx = None
for prop in yt["integration"]["properties"]:
if prop["label"] == "Volume":
volume_idx = prop["id"] - 1 # 1-indexed, change to 0
elif prop["label"] == "$chg_int":
charge_idx = prop["id"] - 1
def get_volume_and_charge(nonequiv_idx):
attractor = yt["integration"]["attractors"][nonequiv_idx - 1]
if attractor["id"] != nonequiv_idx:
raise ValueError(
"List of attractors may be un-ordered (wanted id={}): {}".format(nonequiv_idx, attractor)
)
return (
attractor["integrals"][volume_idx],
attractor["integrals"][charge_idx],
)
volumes = []
charges = []
charge_transfer = []
for idx, site in enumerate(yt["structure"]["cell_atoms"]):
if not np.allclose(structure[idx].frac_coords, site["fractional_coordinates"]):
raise IndexError(
"Site in structure doesn't seem to match site in YT integration:\n{}\n{}".format(
structure[idx], site
)
)
volume, charge = get_volume_and_charge(site["nonequivalent_id"])
volumes.append(volume)
charges.append(charge)
if zpsp:
if structure[idx].species_string in zpsp:
charge_transfer.append(charge - zpsp[structure[idx].species_string])
else:
raise ValueError(
"ZPSP argument does not seem compatible with species in structure ({}): {}".format(
structure[idx].species_string, zpsp
)
)
structure = structure.copy()
structure.add_site_property("bader_volume", volumes)
structure.add_site_property("bader_charge", charges)
if zpsp:
if len(charge_transfer) != len(charges):
warnings.warn("Something went wrong calculating charge transfer: {}".format(charge_transfer))
else:
structure.add_site_property("bader_charge_transfer", charge_transfer)
return structure
def _parse_stdout(self, stdout):
warnings.warn(
"Parsing critic2 standard output is deprecated and will not be maintained, "
"please use the native JSON output in future."
)
stdout = stdout.split("\n")
# NOTE WE ARE USING 0-BASED INDEXING:
# This is different from critic2 which
# uses 1-based indexing, so all parsed
# indices have 1 subtracted.
# Parsing happens in two stages:
# 1. We construct a list of unique critical points
# (i.e. non-equivalent by the symmetry of the crystal)
# and the properties of the field at those points
# 2. We construct a list of nodes and edges describing
# all critical points in the crystal
# Steps 1. and 2. are essentially independent, except
# that the critical points in 2. have a pointer to their
# associated unique critical point in 1. so that more
# information on that point can be retrieved if necessary.
unique_critical_points = []
# parse unique critical points
for i, line in enumerate(stdout):
if "mult name f |grad| lap" in line:
start_i = i + 1
elif "* Analysis of system bonds" in line:
end_i = i - 2
# if start_i and end_i haven't been found, we
# need to re-evaluate assumptions in this parser!
for i, line in enumerate(stdout):
if start_i <= i <= end_i:
l = line.replace("(", "").replace(")", "").split()
unique_idx = int(l[0]) - 1
point_group = l[1]
# type = l[2] # type from definition of critical point e.g. (3, -3)
critical_point_type = l[3] # type from name, e.g. nucleus
frac_coords = [float(l[4]), float(l[5]), float(l[6])]
multiplicity = float(l[7])
# name = float(l[8])
field = float(l[9])
field_gradient = float(l[10])
# laplacian = float(l[11])
point = CriticalPoint(
unique_idx,
critical_point_type,
frac_coords,
point_group,
multiplicity,
field,
field_gradient,
)
unique_critical_points.append(point)
for i, line in enumerate(stdout):
if "+ Critical point no." in line:
unique_idx = int(line.split()[4]) - 1
elif "Hessian:" in line:
l1 = list(map(float, stdout[i + 1].split()))
l2 = list(map(float, stdout[i + 2].split()))
l3 = list(map(float, stdout[i + 3].split()))
hessian = [
[l1[0], l1[1], l1[2]],
[l2[0], l2[1], l2[2]],
[l3[0], l3[1], l3[2]],
]
unique_critical_points[unique_idx].field_hessian = hessian
self.critical_points = unique_critical_points
# parse graph connecting critical points
for i, line in enumerate(stdout):
if "#cp ncp typ position " in line:
start_i = i + 1
elif "* Attractor connectivity matrix" in line:
end_i = i - 2
# if start_i and end_i haven't been found, we
# need to re-evaluate assumptions in this parser!
for i, line in enumerate(stdout):
if start_i <= i <= end_i:
l = line.replace("(", "").replace(")", "").split()
idx = int(l[0]) - 1
unique_idx = int(l[1]) - 1
frac_coords = [float(l[3]), float(l[4]), float(l[5])]
self._add_node(idx, unique_idx, frac_coords)
if len(l) > 6:
from_idx = int(l[6]) - 1
to_idx = int(l[10]) - 1
self._add_edge(
idx,
from_idx=from_idx,
from_lvec=(int(l[7]), int(l[8]), int(l[9])),
to_idx=to_idx,
to_lvec=(int(l[11]), int(l[12]), int(l[13])),
)
def _add_node(self, idx, unique_idx, frac_coords):
"""
Add information about a node describing a critical point.
:param idx: index
:param unique_idx: index of unique CriticalPoint,
used to look up more information of point (field etc.)
:param frac_coord: fractional co-ordinates of point
:return:
"""
self.nodes[idx] = {"unique_idx": unique_idx, "frac_coords": frac_coords}
def _add_edge(self, idx, from_idx, from_lvec, to_idx, to_lvec):
"""
Add information about an edge linking two critical points.
This actually describes two edges:
from_idx ------ idx ------ to_idx
However, in practice, from_idx and to_idx will typically be
atom nuclei, with the center node (idx) referring to a bond
critical point. Thus, it will be more convenient to model
this as a single edge linking nuclei with the properties
of the bond critical point stored as an edge attribute.
:param idx: index of node
:param from_idx: from index of node
:param from_lvec: vector of lattice image the from node is in
as tuple of ints
:param to_idx: to index of node
:param to_lvec: vector of lattice image the to node is in as
tuple of ints
:return:
"""
self.edges[idx] = {
"from_idx": from_idx,
"from_lvec": from_lvec,
"to_idx": to_idx,
"to_lvec": to_lvec,
}
| gmatteo/pymatgen | pymatgen/command_line/critic2_caller.py | Python | mit | 33,962 | [
"CRYSTAL",
"VASP",
"pymatgen"
] | f2ef746d1ef9024c022e47408d994aee2230f5b6d13b70a2e47fa6e4f7ba2ece |
# Copyright (C) 2009 Sun Microsystems, Inc.
#
# This file is part of VirtualBox Open Source Edition (OSE), as
# available from http://www.virtualbox.org. This file is free software;
# you can redistribute it and/or modify it under the terms of the GNU
# General Public License (GPL) as published by the Free Software
# Foundation, in version 2 as it comes in the "COPYING" file of the
# VirtualBox OSE distribution. VirtualBox OSE is distributed in the
# hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
#
# Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
# Clara, CA 95054 USA or visit http://www.sun.com if you need
# additional information or have any questions.
#
# This file is autogenerated from VirtualBox.xidl, DO NOT EDIT!
#
class VirtualBoxReflectionInfo:
def __init__(self, isSym):
self.isSym = isSym
_Values = {
'SettingsVersion':{
'Null':0,'v1_0':1,'v1_1':2,'v1_2':3,'v1_3pre':4,'v1_3':5,'v1_4':6,'v1_5':7,'v1_6':8,'v1_7':9,'v1_8':10,'v1_9':11,'Future':12},
'AccessMode':{
'ReadOnly':1,'ReadWrite':2},
'MachineState':{
'Null':0,'PoweredOff':1,'Saved':2,'Teleported':3,'Aborted':4,'Running':5,'Paused':6,'Stuck':7,'Teleporting':8,'LiveSnapshotting':9,'Starting':10,'Stopping':11,'Saving':12,'Restoring':13,'TeleportingPausedVM':14,'TeleportingIn':15,'RestoringSnapshot':16,'DeletingSnapshot':17,'SettingUp':18,'FirstOnline':5,'LastOnline':13,'FirstTransient':8,'LastTransient':18},
'SessionState':{
'Null':0,'Closed':1,'Open':2,'Spawning':3,'Closing':4},
'CpuPropertyType':{
'Null':0,'PAE':1,'Synthetic':2},
'HWVirtExPropertyType':{
'Null':0,'Enabled':1,'Exclusive':2,'VPID':3,'NestedPaging':4},
'SessionType':{
'Null':0,'Direct':1,'Remote':2,'Existing':3},
'DeviceType':{
'Null':0,'Floppy':1,'DVD':2,'HardDisk':3,'Network':4,'USB':5,'SharedFolder':6},
'DeviceActivity':{
'Null':0,'Idle':1,'Reading':2,'Writing':3},
'ClipboardMode':{
'Disabled':0,'HostToGuest':1,'GuestToHost':2,'Bidirectional':3},
'Scope':{
'Global':0,'Machine':1,'Session':2},
'GuestStatisticType':{
'CPULoad_Idle':0,'CPULoad_Kernel':1,'CPULoad_User':2,'Threads':3,'Processes':4,'Handles':5,'MemoryLoad':6,'PhysMemTotal':7,'PhysMemAvailable':8,'PhysMemBalloon':9,'MemCommitTotal':10,'MemKernelTotal':11,'MemKernelPaged':12,'MemKernelNonpaged':13,'MemSystemCache':14,'PageFileSize':15,'SampleNumber':16,'MaxVal':17},
'BIOSBootMenuMode':{
'Disabled':0,'MenuOnly':1,'MessageAndMenu':2},
'ProcessorFeature':{
'HWVirtEx':0,'PAE':1,'LongMode':2,'NestedPaging':3},
'FirmwareType':{
'BIOS':1,'EFI':2,'EFI32':3,'EFI64':4,'EFIDUAL':5},
'VFSType':{
'File':1,'Cloud':2,'S3':3,'WebDav':4},
'VFSFileType':{
'Unknown':1,'Fifo':2,'DevChar':3,'Directory':4,'DevBlock':5,'File':6,'SymLink':7,'Socket':8,'WhiteOut':9},
'VirtualSystemDescriptionType':{
'Ignore':1,'OS':2,'Name':3,'Product':4,'Vendor':5,'Version':6,'ProductUrl':7,'VendorUrl':8,'Description':9,'License':10,'Miscellaneous':11,'CPU':12,'Memory':13,'HardDiskControllerIDE':14,'HardDiskControllerSATA':15,'HardDiskControllerSCSI':16,'HardDiskImage':17,'Floppy':18,'CDROM':19,'NetworkAdapter':20,'USBController':21,'SoundCard':22},
'VirtualSystemDescriptionValueType':{
'Reference':1,'Original':2,'Auto':3,'ExtraConfig':4},
'HostNetworkInterfaceMediumType':{
'Unknown':0,'Ethernet':1,'PPP':2,'SLIP':3},
'HostNetworkInterfaceStatus':{
'Unknown':0,'Up':1,'Down':2},
'HostNetworkInterfaceType':{
'Bridged':1,'HostOnly':2},
'MediumState':{
'NotCreated':0,'Created':1,'LockedRead':2,'LockedWrite':3,'Inaccessible':4,'Creating':5,'Deleting':6},
'MediumType':{
'Normal':0,'Immutable':1,'Writethrough':2},
'MediumVariant':{
'Standard':0,'VmdkSplit2G':0x01,'VmdkStreamOptimized':0x04,'VmdkESX':0x08,'Fixed':0x10000,'Diff':0x20000},
'DataType':{
'Int32':0,'Int8':1,'String':2},
'DataFlags':{
'None':0x00,'Mandatory':0x01,'Expert':0x02,'Array':0x04,'FlagMask':0x07},
'MediumFormatCapabilities':{
'Uuid':0x01,'CreateFixed':0x02,'CreateDynamic':0x04,'CreateSplit2G':0x08,'Differencing':0x10,'Asynchronous':0x20,'File':0x40,'Properties':0x80,'CapabilityMask':0xFF},
'MouseButtonState':{
'LeftButton':0x01,'RightButton':0x02,'MiddleButton':0x04,'WheelUp':0x08,'WheelDown':0x10,'XButton1':0x20,'XButton2':0x40,'MouseStateMask':0x7F},
'FramebufferPixelFormat':{
'Opaque':0,'FOURCC_RGB':0x32424752},
'NetworkAttachmentType':{
'Null':0,'NAT':1,'Bridged':2,'Internal':3,'HostOnly':4},
'NetworkAdapterType':{
'Null':0,'Am79C970A':1,'Am79C973':2,'I82540EM':3,'I82543GC':4,'I82545EM':5,'Virtio':6},
'PortMode':{
'Disconnected':0,'HostPipe':1,'HostDevice':2,'RawFile':3},
'USBDeviceState':{
'NotSupported':0,'Unavailable':1,'Busy':2,'Available':3,'Held':4,'Captured':5},
'USBDeviceFilterAction':{
'Null':0,'Ignore':1,'Hold':2},
'AudioDriverType':{
'Null':0,'WinMM':1,'OSS':2,'ALSA':3,'DirectSound':4,'CoreAudio':5,'MMPM':6,'Pulse':7,'SolAudio':8},
'AudioControllerType':{
'AC97':0,'SB16':1},
'VRDPAuthType':{
'Null':0,'External':1,'Guest':2},
'StorageBus':{
'Null':0,'IDE':1,'SATA':2,'SCSI':3,'Floppy':4},
'StorageControllerType':{
'Null':0,'LsiLogic':1,'BusLogic':2,'IntelAhci':3,'PIIX3':4,'PIIX4':5,'ICH6':6,'I82078':7}}
_ValuesFlat = {
'SettingsVersion_Null':0,
'SettingsVersion_v1_0':1,
'SettingsVersion_v1_1':2,
'SettingsVersion_v1_2':3,
'SettingsVersion_v1_3pre':4,
'SettingsVersion_v1_3':5,
'SettingsVersion_v1_4':6,
'SettingsVersion_v1_5':7,
'SettingsVersion_v1_6':8,
'SettingsVersion_v1_7':9,
'SettingsVersion_v1_8':10,
'SettingsVersion_v1_9':11,
'SettingsVersion_Future':12,
'AccessMode_ReadOnly':1,
'AccessMode_ReadWrite':2,
'MachineState_Null':0,
'MachineState_PoweredOff':1,
'MachineState_Saved':2,
'MachineState_Teleported':3,
'MachineState_Aborted':4,
'MachineState_Running':5,
'MachineState_Paused':6,
'MachineState_Stuck':7,
'MachineState_Teleporting':8,
'MachineState_LiveSnapshotting':9,
'MachineState_Starting':10,
'MachineState_Stopping':11,
'MachineState_Saving':12,
'MachineState_Restoring':13,
'MachineState_TeleportingPausedVM':14,
'MachineState_TeleportingIn':15,
'MachineState_RestoringSnapshot':16,
'MachineState_DeletingSnapshot':17,
'MachineState_SettingUp':18,
'MachineState_FirstOnline':5,
'MachineState_LastOnline':13,
'MachineState_FirstTransient':8,
'MachineState_LastTransient':18,
'SessionState_Null':0,
'SessionState_Closed':1,
'SessionState_Open':2,
'SessionState_Spawning':3,
'SessionState_Closing':4,
'CpuPropertyType_Null':0,
'CpuPropertyType_PAE':1,
'CpuPropertyType_Synthetic':2,
'HWVirtExPropertyType_Null':0,
'HWVirtExPropertyType_Enabled':1,
'HWVirtExPropertyType_Exclusive':2,
'HWVirtExPropertyType_VPID':3,
'HWVirtExPropertyType_NestedPaging':4,
'SessionType_Null':0,
'SessionType_Direct':1,
'SessionType_Remote':2,
'SessionType_Existing':3,
'DeviceType_Null':0,
'DeviceType_Floppy':1,
'DeviceType_DVD':2,
'DeviceType_HardDisk':3,
'DeviceType_Network':4,
'DeviceType_USB':5,
'DeviceType_SharedFolder':6,
'DeviceActivity_Null':0,
'DeviceActivity_Idle':1,
'DeviceActivity_Reading':2,
'DeviceActivity_Writing':3,
'ClipboardMode_Disabled':0,
'ClipboardMode_HostToGuest':1,
'ClipboardMode_GuestToHost':2,
'ClipboardMode_Bidirectional':3,
'Scope_Global':0,
'Scope_Machine':1,
'Scope_Session':2,
'GuestStatisticType_CPULoad_Idle':0,
'GuestStatisticType_CPULoad_Kernel':1,
'GuestStatisticType_CPULoad_User':2,
'GuestStatisticType_Threads':3,
'GuestStatisticType_Processes':4,
'GuestStatisticType_Handles':5,
'GuestStatisticType_MemoryLoad':6,
'GuestStatisticType_PhysMemTotal':7,
'GuestStatisticType_PhysMemAvailable':8,
'GuestStatisticType_PhysMemBalloon':9,
'GuestStatisticType_MemCommitTotal':10,
'GuestStatisticType_MemKernelTotal':11,
'GuestStatisticType_MemKernelPaged':12,
'GuestStatisticType_MemKernelNonpaged':13,
'GuestStatisticType_MemSystemCache':14,
'GuestStatisticType_PageFileSize':15,
'GuestStatisticType_SampleNumber':16,
'GuestStatisticType_MaxVal':17,
'BIOSBootMenuMode_Disabled':0,
'BIOSBootMenuMode_MenuOnly':1,
'BIOSBootMenuMode_MessageAndMenu':2,
'ProcessorFeature_HWVirtEx':0,
'ProcessorFeature_PAE':1,
'ProcessorFeature_LongMode':2,
'ProcessorFeature_NestedPaging':3,
'FirmwareType_BIOS':1,
'FirmwareType_EFI':2,
'FirmwareType_EFI32':3,
'FirmwareType_EFI64':4,
'FirmwareType_EFIDUAL':5,
'VFSType_File':1,
'VFSType_Cloud':2,
'VFSType_S3':3,
'VFSType_WebDav':4,
'VFSFileType_Unknown':1,
'VFSFileType_Fifo':2,
'VFSFileType_DevChar':3,
'VFSFileType_Directory':4,
'VFSFileType_DevBlock':5,
'VFSFileType_File':6,
'VFSFileType_SymLink':7,
'VFSFileType_Socket':8,
'VFSFileType_WhiteOut':9,
'VirtualSystemDescriptionType_Ignore':1,
'VirtualSystemDescriptionType_OS':2,
'VirtualSystemDescriptionType_Name':3,
'VirtualSystemDescriptionType_Product':4,
'VirtualSystemDescriptionType_Vendor':5,
'VirtualSystemDescriptionType_Version':6,
'VirtualSystemDescriptionType_ProductUrl':7,
'VirtualSystemDescriptionType_VendorUrl':8,
'VirtualSystemDescriptionType_Description':9,
'VirtualSystemDescriptionType_License':10,
'VirtualSystemDescriptionType_Miscellaneous':11,
'VirtualSystemDescriptionType_CPU':12,
'VirtualSystemDescriptionType_Memory':13,
'VirtualSystemDescriptionType_HardDiskControllerIDE':14,
'VirtualSystemDescriptionType_HardDiskControllerSATA':15,
'VirtualSystemDescriptionType_HardDiskControllerSCSI':16,
'VirtualSystemDescriptionType_HardDiskImage':17,
'VirtualSystemDescriptionType_Floppy':18,
'VirtualSystemDescriptionType_CDROM':19,
'VirtualSystemDescriptionType_NetworkAdapter':20,
'VirtualSystemDescriptionType_USBController':21,
'VirtualSystemDescriptionType_SoundCard':22,
'VirtualSystemDescriptionValueType_Reference':1,
'VirtualSystemDescriptionValueType_Original':2,
'VirtualSystemDescriptionValueType_Auto':3,
'VirtualSystemDescriptionValueType_ExtraConfig':4,
'HostNetworkInterfaceMediumType_Unknown':0,
'HostNetworkInterfaceMediumType_Ethernet':1,
'HostNetworkInterfaceMediumType_PPP':2,
'HostNetworkInterfaceMediumType_SLIP':3,
'HostNetworkInterfaceStatus_Unknown':0,
'HostNetworkInterfaceStatus_Up':1,
'HostNetworkInterfaceStatus_Down':2,
'HostNetworkInterfaceType_Bridged':1,
'HostNetworkInterfaceType_HostOnly':2,
'MediumState_NotCreated':0,
'MediumState_Created':1,
'MediumState_LockedRead':2,
'MediumState_LockedWrite':3,
'MediumState_Inaccessible':4,
'MediumState_Creating':5,
'MediumState_Deleting':6,
'MediumType_Normal':0,
'MediumType_Immutable':1,
'MediumType_Writethrough':2,
'MediumVariant_Standard':0,
'MediumVariant_VmdkSplit2G':0x01,
'MediumVariant_VmdkStreamOptimized':0x04,
'MediumVariant_VmdkESX':0x08,
'MediumVariant_Fixed':0x10000,
'MediumVariant_Diff':0x20000,
'DataType_Int32':0,
'DataType_Int8':1,
'DataType_String':2,
'DataFlags_None':0x00,
'DataFlags_Mandatory':0x01,
'DataFlags_Expert':0x02,
'DataFlags_Array':0x04,
'DataFlags_FlagMask':0x07,
'MediumFormatCapabilities_Uuid':0x01,
'MediumFormatCapabilities_CreateFixed':0x02,
'MediumFormatCapabilities_CreateDynamic':0x04,
'MediumFormatCapabilities_CreateSplit2G':0x08,
'MediumFormatCapabilities_Differencing':0x10,
'MediumFormatCapabilities_Asynchronous':0x20,
'MediumFormatCapabilities_File':0x40,
'MediumFormatCapabilities_Properties':0x80,
'MediumFormatCapabilities_CapabilityMask':0xFF,
'MouseButtonState_LeftButton':0x01,
'MouseButtonState_RightButton':0x02,
'MouseButtonState_MiddleButton':0x04,
'MouseButtonState_WheelUp':0x08,
'MouseButtonState_WheelDown':0x10,
'MouseButtonState_XButton1':0x20,
'MouseButtonState_XButton2':0x40,
'MouseButtonState_MouseStateMask':0x7F,
'FramebufferPixelFormat_Opaque':0,
'FramebufferPixelFormat_FOURCC_RGB':0x32424752,
'NetworkAttachmentType_Null':0,
'NetworkAttachmentType_NAT':1,
'NetworkAttachmentType_Bridged':2,
'NetworkAttachmentType_Internal':3,
'NetworkAttachmentType_HostOnly':4,
'NetworkAdapterType_Null':0,
'NetworkAdapterType_Am79C970A':1,
'NetworkAdapterType_Am79C973':2,
'NetworkAdapterType_I82540EM':3,
'NetworkAdapterType_I82543GC':4,
'NetworkAdapterType_I82545EM':5,
'NetworkAdapterType_Virtio':6,
'PortMode_Disconnected':0,
'PortMode_HostPipe':1,
'PortMode_HostDevice':2,
'PortMode_RawFile':3,
'USBDeviceState_NotSupported':0,
'USBDeviceState_Unavailable':1,
'USBDeviceState_Busy':2,
'USBDeviceState_Available':3,
'USBDeviceState_Held':4,
'USBDeviceState_Captured':5,
'USBDeviceFilterAction_Null':0,
'USBDeviceFilterAction_Ignore':1,
'USBDeviceFilterAction_Hold':2,
'AudioDriverType_Null':0,
'AudioDriverType_WinMM':1,
'AudioDriverType_OSS':2,
'AudioDriverType_ALSA':3,
'AudioDriverType_DirectSound':4,
'AudioDriverType_CoreAudio':5,
'AudioDriverType_MMPM':6,
'AudioDriverType_Pulse':7,
'AudioDriverType_SolAudio':8,
'AudioControllerType_AC97':0,
'AudioControllerType_SB16':1,
'VRDPAuthType_Null':0,
'VRDPAuthType_External':1,
'VRDPAuthType_Guest':2,
'StorageBus_Null':0,
'StorageBus_IDE':1,
'StorageBus_SATA':2,
'StorageBus_SCSI':3,
'StorageBus_Floppy':4,
'StorageControllerType_Null':0,
'StorageControllerType_LsiLogic':1,
'StorageControllerType_BusLogic':2,
'StorageControllerType_IntelAhci':3,
'StorageControllerType_PIIX3':4,
'StorageControllerType_PIIX4':5,
'StorageControllerType_ICH6':6,
'StorageControllerType_I82078':7}
_ValuesFlatSym = {
'SettingsVersion_Null': 'Null',
'SettingsVersion_v1_0': 'v1_0',
'SettingsVersion_v1_1': 'v1_1',
'SettingsVersion_v1_2': 'v1_2',
'SettingsVersion_v1_3pre': 'v1_3pre',
'SettingsVersion_v1_3': 'v1_3',
'SettingsVersion_v1_4': 'v1_4',
'SettingsVersion_v1_5': 'v1_5',
'SettingsVersion_v1_6': 'v1_6',
'SettingsVersion_v1_7': 'v1_7',
'SettingsVersion_v1_8': 'v1_8',
'SettingsVersion_v1_9': 'v1_9',
'SettingsVersion_Future': 'Future',
'AccessMode_ReadOnly': 'ReadOnly',
'AccessMode_ReadWrite': 'ReadWrite',
'MachineState_Null': 'Null',
'MachineState_PoweredOff': 'PoweredOff',
'MachineState_Saved': 'Saved',
'MachineState_Teleported': 'Teleported',
'MachineState_Aborted': 'Aborted',
'MachineState_Running': 'Running',
'MachineState_Paused': 'Paused',
'MachineState_Stuck': 'Stuck',
'MachineState_Teleporting': 'Teleporting',
'MachineState_LiveSnapshotting': 'LiveSnapshotting',
'MachineState_Starting': 'Starting',
'MachineState_Stopping': 'Stopping',
'MachineState_Saving': 'Saving',
'MachineState_Restoring': 'Restoring',
'MachineState_TeleportingPausedVM': 'TeleportingPausedVM',
'MachineState_TeleportingIn': 'TeleportingIn',
'MachineState_RestoringSnapshot': 'RestoringSnapshot',
'MachineState_DeletingSnapshot': 'DeletingSnapshot',
'MachineState_SettingUp': 'SettingUp',
'MachineState_FirstOnline': 'FirstOnline',
'MachineState_LastOnline': 'LastOnline',
'MachineState_FirstTransient': 'FirstTransient',
'MachineState_LastTransient': 'LastTransient',
'SessionState_Null': 'Null',
'SessionState_Closed': 'Closed',
'SessionState_Open': 'Open',
'SessionState_Spawning': 'Spawning',
'SessionState_Closing': 'Closing',
'CpuPropertyType_Null': 'Null',
'CpuPropertyType_PAE': 'PAE',
'CpuPropertyType_Synthetic': 'Synthetic',
'HWVirtExPropertyType_Null': 'Null',
'HWVirtExPropertyType_Enabled': 'Enabled',
'HWVirtExPropertyType_Exclusive': 'Exclusive',
'HWVirtExPropertyType_VPID': 'VPID',
'HWVirtExPropertyType_NestedPaging': 'NestedPaging',
'SessionType_Null': 'Null',
'SessionType_Direct': 'Direct',
'SessionType_Remote': 'Remote',
'SessionType_Existing': 'Existing',
'DeviceType_Null': 'Null',
'DeviceType_Floppy': 'Floppy',
'DeviceType_DVD': 'DVD',
'DeviceType_HardDisk': 'HardDisk',
'DeviceType_Network': 'Network',
'DeviceType_USB': 'USB',
'DeviceType_SharedFolder': 'SharedFolder',
'DeviceActivity_Null': 'Null',
'DeviceActivity_Idle': 'Idle',
'DeviceActivity_Reading': 'Reading',
'DeviceActivity_Writing': 'Writing',
'ClipboardMode_Disabled': 'Disabled',
'ClipboardMode_HostToGuest': 'HostToGuest',
'ClipboardMode_GuestToHost': 'GuestToHost',
'ClipboardMode_Bidirectional': 'Bidirectional',
'Scope_Global': 'Global',
'Scope_Machine': 'Machine',
'Scope_Session': 'Session',
'GuestStatisticType_CPULoad_Idle': 'CPULoad_Idle',
'GuestStatisticType_CPULoad_Kernel': 'CPULoad_Kernel',
'GuestStatisticType_CPULoad_User': 'CPULoad_User',
'GuestStatisticType_Threads': 'Threads',
'GuestStatisticType_Processes': 'Processes',
'GuestStatisticType_Handles': 'Handles',
'GuestStatisticType_MemoryLoad': 'MemoryLoad',
'GuestStatisticType_PhysMemTotal': 'PhysMemTotal',
'GuestStatisticType_PhysMemAvailable': 'PhysMemAvailable',
'GuestStatisticType_PhysMemBalloon': 'PhysMemBalloon',
'GuestStatisticType_MemCommitTotal': 'MemCommitTotal',
'GuestStatisticType_MemKernelTotal': 'MemKernelTotal',
'GuestStatisticType_MemKernelPaged': 'MemKernelPaged',
'GuestStatisticType_MemKernelNonpaged': 'MemKernelNonpaged',
'GuestStatisticType_MemSystemCache': 'MemSystemCache',
'GuestStatisticType_PageFileSize': 'PageFileSize',
'GuestStatisticType_SampleNumber': 'SampleNumber',
'GuestStatisticType_MaxVal': 'MaxVal',
'BIOSBootMenuMode_Disabled': 'Disabled',
'BIOSBootMenuMode_MenuOnly': 'MenuOnly',
'BIOSBootMenuMode_MessageAndMenu': 'MessageAndMenu',
'ProcessorFeature_HWVirtEx': 'HWVirtEx',
'ProcessorFeature_PAE': 'PAE',
'ProcessorFeature_LongMode': 'LongMode',
'ProcessorFeature_NestedPaging': 'NestedPaging',
'FirmwareType_BIOS': 'BIOS',
'FirmwareType_EFI': 'EFI',
'FirmwareType_EFI32': 'EFI32',
'FirmwareType_EFI64': 'EFI64',
'FirmwareType_EFIDUAL': 'EFIDUAL',
'VFSType_File': 'File',
'VFSType_Cloud': 'Cloud',
'VFSType_S3': 'S3',
'VFSType_WebDav': 'WebDav',
'VFSFileType_Unknown': 'Unknown',
'VFSFileType_Fifo': 'Fifo',
'VFSFileType_DevChar': 'DevChar',
'VFSFileType_Directory': 'Directory',
'VFSFileType_DevBlock': 'DevBlock',
'VFSFileType_File': 'File',
'VFSFileType_SymLink': 'SymLink',
'VFSFileType_Socket': 'Socket',
'VFSFileType_WhiteOut': 'WhiteOut',
'VirtualSystemDescriptionType_Ignore': 'Ignore',
'VirtualSystemDescriptionType_OS': 'OS',
'VirtualSystemDescriptionType_Name': 'Name',
'VirtualSystemDescriptionType_Product': 'Product',
'VirtualSystemDescriptionType_Vendor': 'Vendor',
'VirtualSystemDescriptionType_Version': 'Version',
'VirtualSystemDescriptionType_ProductUrl': 'ProductUrl',
'VirtualSystemDescriptionType_VendorUrl': 'VendorUrl',
'VirtualSystemDescriptionType_Description': 'Description',
'VirtualSystemDescriptionType_License': 'License',
'VirtualSystemDescriptionType_Miscellaneous': 'Miscellaneous',
'VirtualSystemDescriptionType_CPU': 'CPU',
'VirtualSystemDescriptionType_Memory': 'Memory',
'VirtualSystemDescriptionType_HardDiskControllerIDE': 'HardDiskControllerIDE',
'VirtualSystemDescriptionType_HardDiskControllerSATA': 'HardDiskControllerSATA',
'VirtualSystemDescriptionType_HardDiskControllerSCSI': 'HardDiskControllerSCSI',
'VirtualSystemDescriptionType_HardDiskImage': 'HardDiskImage',
'VirtualSystemDescriptionType_Floppy': 'Floppy',
'VirtualSystemDescriptionType_CDROM': 'CDROM',
'VirtualSystemDescriptionType_NetworkAdapter': 'NetworkAdapter',
'VirtualSystemDescriptionType_USBController': 'USBController',
'VirtualSystemDescriptionType_SoundCard': 'SoundCard',
'VirtualSystemDescriptionValueType_Reference': 'Reference',
'VirtualSystemDescriptionValueType_Original': 'Original',
'VirtualSystemDescriptionValueType_Auto': 'Auto',
'VirtualSystemDescriptionValueType_ExtraConfig': 'ExtraConfig',
'HostNetworkInterfaceMediumType_Unknown': 'Unknown',
'HostNetworkInterfaceMediumType_Ethernet': 'Ethernet',
'HostNetworkInterfaceMediumType_PPP': 'PPP',
'HostNetworkInterfaceMediumType_SLIP': 'SLIP',
'HostNetworkInterfaceStatus_Unknown': 'Unknown',
'HostNetworkInterfaceStatus_Up': 'Up',
'HostNetworkInterfaceStatus_Down': 'Down',
'HostNetworkInterfaceType_Bridged': 'Bridged',
'HostNetworkInterfaceType_HostOnly': 'HostOnly',
'MediumState_NotCreated': 'NotCreated',
'MediumState_Created': 'Created',
'MediumState_LockedRead': 'LockedRead',
'MediumState_LockedWrite': 'LockedWrite',
'MediumState_Inaccessible': 'Inaccessible',
'MediumState_Creating': 'Creating',
'MediumState_Deleting': 'Deleting',
'MediumType_Normal': 'Normal',
'MediumType_Immutable': 'Immutable',
'MediumType_Writethrough': 'Writethrough',
'MediumVariant_Standard': 'Standard',
'MediumVariant_VmdkSplit2G': 'VmdkSplit2G',
'MediumVariant_VmdkStreamOptimized': 'VmdkStreamOptimized',
'MediumVariant_VmdkESX': 'VmdkESX',
'MediumVariant_Fixed': 'Fixed',
'MediumVariant_Diff': 'Diff',
'DataType_Int32': 'Int32',
'DataType_Int8': 'Int8',
'DataType_String': 'String',
'DataFlags_None': 'None',
'DataFlags_Mandatory': 'Mandatory',
'DataFlags_Expert': 'Expert',
'DataFlags_Array': 'Array',
'DataFlags_FlagMask': 'FlagMask',
'MediumFormatCapabilities_Uuid': 'Uuid',
'MediumFormatCapabilities_CreateFixed': 'CreateFixed',
'MediumFormatCapabilities_CreateDynamic': 'CreateDynamic',
'MediumFormatCapabilities_CreateSplit2G': 'CreateSplit2G',
'MediumFormatCapabilities_Differencing': 'Differencing',
'MediumFormatCapabilities_Asynchronous': 'Asynchronous',
'MediumFormatCapabilities_File': 'File',
'MediumFormatCapabilities_Properties': 'Properties',
'MediumFormatCapabilities_CapabilityMask': 'CapabilityMask',
'MouseButtonState_LeftButton': 'LeftButton',
'MouseButtonState_RightButton': 'RightButton',
'MouseButtonState_MiddleButton': 'MiddleButton',
'MouseButtonState_WheelUp': 'WheelUp',
'MouseButtonState_WheelDown': 'WheelDown',
'MouseButtonState_XButton1': 'XButton1',
'MouseButtonState_XButton2': 'XButton2',
'MouseButtonState_MouseStateMask': 'MouseStateMask',
'FramebufferPixelFormat_Opaque': 'Opaque',
'FramebufferPixelFormat_FOURCC_RGB': 'FOURCC_RGB',
'NetworkAttachmentType_Null': 'Null',
'NetworkAttachmentType_NAT': 'NAT',
'NetworkAttachmentType_Bridged': 'Bridged',
'NetworkAttachmentType_Internal': 'Internal',
'NetworkAttachmentType_HostOnly': 'HostOnly',
'NetworkAdapterType_Null': 'Null',
'NetworkAdapterType_Am79C970A': 'Am79C970A',
'NetworkAdapterType_Am79C973': 'Am79C973',
'NetworkAdapterType_I82540EM': 'I82540EM',
'NetworkAdapterType_I82543GC': 'I82543GC',
'NetworkAdapterType_I82545EM': 'I82545EM',
'NetworkAdapterType_Virtio': 'Virtio',
'PortMode_Disconnected': 'Disconnected',
'PortMode_HostPipe': 'HostPipe',
'PortMode_HostDevice': 'HostDevice',
'PortMode_RawFile': 'RawFile',
'USBDeviceState_NotSupported': 'NotSupported',
'USBDeviceState_Unavailable': 'Unavailable',
'USBDeviceState_Busy': 'Busy',
'USBDeviceState_Available': 'Available',
'USBDeviceState_Held': 'Held',
'USBDeviceState_Captured': 'Captured',
'USBDeviceFilterAction_Null': 'Null',
'USBDeviceFilterAction_Ignore': 'Ignore',
'USBDeviceFilterAction_Hold': 'Hold',
'AudioDriverType_Null': 'Null',
'AudioDriverType_WinMM': 'WinMM',
'AudioDriverType_OSS': 'OSS',
'AudioDriverType_ALSA': 'ALSA',
'AudioDriverType_DirectSound': 'DirectSound',
'AudioDriverType_CoreAudio': 'CoreAudio',
'AudioDriverType_MMPM': 'MMPM',
'AudioDriverType_Pulse': 'Pulse',
'AudioDriverType_SolAudio': 'SolAudio',
'AudioControllerType_AC97': 'AC97',
'AudioControllerType_SB16': 'SB16',
'VRDPAuthType_Null': 'Null',
'VRDPAuthType_External': 'External',
'VRDPAuthType_Guest': 'Guest',
'StorageBus_Null': 'Null',
'StorageBus_IDE': 'IDE',
'StorageBus_SATA': 'SATA',
'StorageBus_SCSI': 'SCSI',
'StorageBus_Floppy': 'Floppy',
'StorageControllerType_Null': 'Null',
'StorageControllerType_LsiLogic': 'LsiLogic',
'StorageControllerType_BusLogic': 'BusLogic',
'StorageControllerType_IntelAhci': 'IntelAhci',
'StorageControllerType_PIIX3': 'PIIX3',
'StorageControllerType_PIIX4': 'PIIX4',
'StorageControllerType_ICH6': 'ICH6',
'StorageControllerType_I82078': 'I82078'}
def __getattr__(self,attr):
if self.isSym:
v = self._ValuesFlatSym.get(attr)
else:
v = self._ValuesFlat.get(attr)
if v is not None:
return v
else:
raise AttributeError
| vienin/vlaunch | vboxapi/VirtualBox_constants.py | Python | gpl-2.0 | 36,420 | [
"VisIt"
] | d1163e0f74057da764c3c7602de33e214b44ddbf6749da24ccc9cc841e21b153 |
"""
# Notes:
- This simulation seeks to emulate the COBAHH benchmark simulations of (Brette
et al. 2007) using the Brian2 simulator for speed benchmark comparison to
DynaSim. However, this simulation includes CLOCK-DRIVEN synapses, for direct
comparison to DynaSim's clock-driven architecture. The synaptic connections
are "high-density", with a 90% probability of connection.
- The time taken to simulate will be indicated in the stdout log file
'~/batchdirs/brian_benchmark_COBAHH_clocksyn_hidens_8000/pbsout/brian_benchmark_COBAHH_clocksyn_hidens_8000.out'
- Note that this code has been slightly modified from the original (Brette et
al. 2007) benchmarking code, available here on ModelDB:
https://senselab.med.yale.edu/modeldb/showModel.cshtml?model=83319 in order
to work with version 2 of the Brian simulator (aka Brian2), and also modified
to change the model being benchmarked, etc.
# References:
- Brette R, Rudolph M, Carnevale T, Hines M, Beeman D, Bower JM, et al.
Simulation of networks of spiking neurons: A review of tools and strategies.
Journal of Computational Neuroscience 2007;23:349–98.
doi:10.1007/s10827-007-0038-6.
- Goodman D, Brette R. Brian: a simulator for spiking neural networks in Python.
Frontiers in Neuroinformatics 2008;2. doi:10.3389/neuro.11.005.2008.
"""
from brian2 import *
# Parameters
cells = 8000
defaultclock.dt = 0.01*ms
area = 20000*umetre**2
Cm = (1*ufarad*cmetre**-2) * area
gl = (5e-5*siemens*cmetre**-2) * area
El = -60*mV
EK = -90*mV
ENa = 50*mV
g_na = (100*msiemens*cmetre**-2) * area
g_kd = (30*msiemens*cmetre**-2) * area
VT = -63*mV
# Synaptic strengths
gAMPA = (0.1*msiemens*cmetre**-2)* area
gGABAA = (0.06*msiemens*cmetre**-2)* area
# Synaptic time constants
tauAMPA = 2
tauGABAA = 5
# Synaptic reversal potentials
EAMPA = 1*mV
EGABAA = -80*mV
# The model
eqs = Equations('''
dv/dt = (gl*(El-v)-
gAMPA/cells*sAMPAtotal*(v-EAMPA)-
gGABAA/cells*sGABAAtotal*(v-EGABAA)-
g_na*(m*m*m)*h*(v-ENa)-
g_kd*(n*n*n*n)*(v-EK))/Cm : volt
dm/dt = alpha_m*(1-m)-beta_m*m : 1
dn/dt = alpha_n*(1-n)-beta_n*n : 1
dh/dt = alpha_h*(1-h)-beta_h*h : 1
alpha_m = 0.32*(mV**-1)*(13*mV-v+VT)/
(exp((13*mV-v+VT)/(4*mV))-1.)/ms : Hz
beta_m = 0.28*(mV**-1)*(v-VT-40*mV)/
(exp((v-VT-40*mV)/(5*mV))-1)/ms : Hz
alpha_h = 0.128*exp((17*mV-v+VT)/(18*mV))/ms : Hz
beta_h = 4./(1+exp((40*mV-v+VT)/(5*mV)))/ms : Hz
alpha_n = 0.032*(mV**-1)*(15*mV-v+VT)/
(exp((15*mV-v+VT)/(5*mV))-1.)/ms : Hz
beta_n = .5*exp((10*mV-v+VT)/(40*mV))/ms : Hz
sAMPAtotal : 1
sGABAAtotal : 1
''')
# Construct intrinsic cells
P = NeuronGroup(cells, model=eqs, method='euler')
proportion=int(0.8*cells)
Pe = P[:proportion]
Pi = P[proportion:]
# Contruct synaptic network
sAMPA=Synapses(Pe,P,
model='''ds/dt=1000.*5.*(1 + tanh(v_pre/(4.*mV)))*(1-s)/ms - (s)/(2*ms) : 1 (clock-driven)
sAMPAtotal_post = s : 1 (summed)
''')
sAMPA.connect(p=0.90)
sGABAA_RETC=Synapses(Pi,P,
model='''ds/dt=1000.*2.*(1 + tanh(v_pre/(4.*mV)))*(1-s)/ms - s/(5*ms) : 1 (clock-driven)
sGABAAtotal_post = s : 1 (summed)
''')
sGABAA_RETC.connect(p=0.90)
# Initialization
P.v = 'El + (randn() * 5 - 5)*mV'
# Record a few traces
trace = StateMonitor(P, 'v', record=[1, 10, 100])
totaldata = StateMonitor(P, 'v', record=True)
run(0.5 * second, report='text')
# # If you want to plot:
# plot(trace.t/ms, trace[1].v/mV)
# plot(trace.t/ms, trace[10].v/mV)
# plot(trace.t/ms, trace[100].v/mV)
# xlabel('t (ms)')
# ylabel('v (mV)')
# show()
# # If you want to save data:
# print("Saving TC cell voltages!")
# numpy.savetxt("foo_totaldata.csv", totaldata.v/mV, delimiter=",")
| asoplata/dynasim-benchmark-brette-2007 | output/Brian2/brian2_benchmark_COBAHH_clocksyn_hidens_8000/brian2_benchmark_COBAHH_clocksyn_hidens_8000.py | Python | gpl-3.0 | 3,778 | [
"Brian"
] | 2486ebbe99456a5dc510f3521aa291cbf7f0607f554ad2b3ea2b3c3c99732b73 |
#!/usr/bin/python
################################################################################
#
# SOAP.py 0.9.7 - Cayce Ullman (cayce@actzero.com)
# Brian Matthews (blm@actzero.com)
#
# INCLUDED:
# - General SOAP Parser based on sax.xml (requires Python 2.0)
# - General SOAP Builder
# - SOAP Proxy for RPC client code
# - SOAP Server framework for RPC server code
#
# FEATURES:
# - Handles all of the types in the BDG
# - Handles faults
# - Allows namespace specification
# - Allows SOAPAction specification
# - Homogeneous typed arrays
# - Supports multiple schemas
# - Header support (mustUnderstand and actor)
# - XML attribute support
# - Multi-referencing support (Parser/Builder)
# - Understands SOAP-ENC:root attribute
# - Good interop, passes all client tests for Frontier, SOAP::LITE, SOAPRMI
# - Encodings
# - SSL clients (with OpenSSL configured in to Python)
# - SSL servers (with OpenSSL configured in to Python and M2Crypto installed)
#
# TODO:
# - Timeout on method calls - MCU
# - Arrays (sparse, multidimensional and partial) - BLM
# - Clean up data types - BLM
# - Type coercion system (Builder) - MCU
# - Early WSDL Support - MCU
# - Attachments - BLM
# - setup.py - MCU
# - mod_python example - MCU
# - medusa example - MCU
# - Documentation - JAG
# - Look at performance
#
################################################################################
#
# Copyright (c) 2001, Cayce Ullman.
# Copyright (c) 2001, Brian Matthews.
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# Neither the name of actzero, inc. nor the names of its contributors may
# be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
################################################################################
#
# Additional changes:
# 0.9.7.3 - 4/18/2002 - Mark Pilgrim (f8dy@diveintomark.org)
# added dump_dict as alias for dump_dictionary for Python 2.2 compatibility
# 0.9.7.2 - 4/12/2002 - Mark Pilgrim (f8dy@diveintomark.org)
# fixed logic to unmarshal the value of "null" attributes ("true" or "1"
# means true, others false)
# 0.9.7.1 - 4/11/2002 - Mark Pilgrim (f8dy@diveintomark.org)
# added "dump_str" as alias for "dump_string" for Python 2.2 compatibility
# Between 2.1 and 2.2, type("").__name__ changed from "string" to "str"
################################################################################
import xml.sax
import UserList
import base64
import cgi
import urllib
import exceptions
import copy
import re
import socket
import string
import sys
import time
import SocketServer
from types import *
try: from M2Crypto import SSL
except: pass
ident = '$Id: SOAP.py,v 1.1.1.1 2004/01/16 16:15:18 bluecoat93 Exp $'
__version__ = "0.9.7.3"
# Platform hackery
# Check float support
try:
float("NaN")
float("INF")
float("-INF")
good_float = 1
except:
good_float = 0
################################################################################
# Exceptions
################################################################################
class Error(exceptions.Exception):
def __init__(self, msg):
self.msg = msg
def __str__(self):
return "<Error : %s>" % self.msg
__repr__ = __str__
class RecursionError(Error):
pass
class UnknownTypeError(Error):
pass
class HTTPError(Error):
# indicates an HTTP protocol error
def __init__(self, code, msg):
self.code = code
self.msg = msg
def __str__(self):
return "<HTTPError %s %s>" % (self.code, self.msg)
__repr__ = __str__
##############################################################################
# Namespace Class
################################################################################
def invertDict(dict):
d = {}
for k, v in dict.items():
d[v] = k
return d
class NS:
XML = "http://www.w3.org/XML/1998/namespace"
ENV = "http://schemas.xmlsoap.org/soap/envelope/"
ENC = "http://schemas.xmlsoap.org/soap/encoding/"
XSD = "http://www.w3.org/1999/XMLSchema"
XSD2 = "http://www.w3.org/2000/10/XMLSchema"
XSD3 = "http://www.w3.org/2001/XMLSchema"
XSD_L = [XSD, XSD2, XSD3]
EXSD_L= [ENC, XSD, XSD2, XSD3]
XSI = "http://www.w3.org/1999/XMLSchema-instance"
XSI2 = "http://www.w3.org/2000/10/XMLSchema-instance"
XSI3 = "http://www.w3.org/2001/XMLSchema-instance"
XSI_L = [XSI, XSI2, XSI3]
URN = "http://soapinterop.org/xsd"
# For generated messages
XML_T = "xml"
ENV_T = "SOAP-ENV"
ENC_T = "SOAP-ENC"
XSD_T = "xsd"
XSD2_T= "xsd2"
XSD3_T= "xsd3"
XSI_T = "xsi"
XSI2_T= "xsi2"
XSI3_T= "xsi3"
URN_T = "urn"
NSMAP = {ENV_T: ENV, ENC_T: ENC, XSD_T: XSD, XSD2_T: XSD2,
XSD3_T: XSD3, XSI_T: XSI, XSI2_T: XSI2, XSI3_T: XSI3,
URN_T: URN}
NSMAP_R = invertDict(NSMAP)
STMAP = {'1999': (XSD_T, XSI_T), '2000': (XSD2_T, XSI2_T),
'2001': (XSD3_T, XSI3_T)}
STMAP_R = invertDict(STMAP)
def __init__(self):
raise Error, "Don't instantiate this"
################################################################################
# Configuration class
################################################################################
class SOAPConfig:
__readonly = ('SSLserver', 'SSLclient')
def __init__(self, config = None, **kw):
d = self.__dict__
if config:
if not isinstance(config, SOAPConfig):
raise AttributeError, \
"initializer must be SOAPConfig instance"
s = config.__dict__
for k, v in s.items():
if k[0] != '_':
d[k] = v
else:
# Setting debug also sets returnFaultInfo, dumpFaultInfo,
# dumpHeadersIn, dumpHeadersOut, dumpSOAPIn, and dumpSOAPOut
self.debug = 0
# Setting namespaceStyle sets typesNamespace, typesNamespaceURI,
# schemaNamespace, and schemaNamespaceURI
self.namespaceStyle = '1999'
self.strictNamespaces = 0
self.typed = 1
self.buildWithNamespacePrefix = 1
self.returnAllAttrs = 0
try: SSL; d['SSLserver'] = 1
except: d['SSLserver'] = 0
try: socket.ssl; d['SSLclient'] = 1
except: d['SSLclient'] = 0
for k, v in kw.items():
if k[0] != '_':
setattr(self, k, v)
def __setattr__(self, name, value):
if name in self.__readonly:
raise AttributeError, "readonly configuration setting"
d = self.__dict__
if name in ('typesNamespace', 'typesNamespaceURI',
'schemaNamespace', 'schemaNamespaceURI'):
if name[-3:] == 'URI':
base, uri = name[:-3], 1
else:
base, uri = name, 0
if type(value) == StringType:
if NS.NSMAP.has_key(value):
n = (value, NS.NSMAP[value])
elif NS.NSMAP_R.has_key(value):
n = (NS.NSMAP_R[value], value)
else:
raise AttributeError, "unknown namespace"
elif type(value) in (ListType, TupleType):
if uri:
n = (value[1], value[0])
else:
n = (value[0], value[1])
else:
raise AttributeError, "unknown namespace type"
d[base], d[base + 'URI'] = n
try:
d['namespaceStyle'] = \
NS.STMAP_R[(d['typesNamespace'], d['schemaNamespace'])]
except:
d['namespaceStyle'] = ''
elif name == 'namespaceStyle':
value = str(value)
if not NS.STMAP.has_key(value):
raise AttributeError, "unknown namespace style"
d[name] = value
n = d['typesNamespace'] = NS.STMAP[value][0]
d['typesNamespaceURI'] = NS.NSMAP[n]
n = d['schemaNamespace'] = NS.STMAP[value][1]
d['schemaNamespaceURI'] = NS.NSMAP[n]
elif name == 'debug':
d[name] = \
d['returnFaultInfo'] = \
d['dumpFaultInfo'] = \
d['dumpHeadersIn'] = \
d['dumpHeadersOut'] = \
d['dumpSOAPIn'] = \
d['dumpSOAPOut'] = value
else:
d[name] = value
Config = SOAPConfig()
################################################################################
# Types and Wrappers
################################################################################
class anyType:
_validURIs = (NS.XSD, NS.XSD2, NS.XSD3, NS.ENC)
def __init__(self, data = None, name = None, typed = 1, attrs = None):
if self.__class__ == anyType:
raise Error, "anyType can't be instantiated directly"
if type(name) in (ListType, TupleType):
self._ns, self._name = name
else:
self._ns, self._name = self._validURIs[0], name
self._typed = typed
self._attrs = {}
self._cache = None
self._type = self._typeName()
self._data = self._checkValueSpace(data)
if attrs != None:
self._setAttrs(attrs)
def __str__(self):
if self._name:
return "<%s %s at %d>" % (self.__class__, self._name, id(self))
return "<%s at %d>" % (self.__class__, id(self))
__repr__ = __str__
def _checkValueSpace(self, data):
return data
def _marshalData(self):
return str(self._data)
def _marshalAttrs(self, ns_map, builder):
a = ''
for attr, value in self._attrs.items():
ns, n = builder.genns(ns_map, attr[0])
a += n + ' %s%s="%s"' % \
(ns, attr[1], cgi.escape(str(value), 1))
return a
def _fixAttr(self, attr):
if type(attr) in (StringType, UnicodeType):
attr = (None, attr)
elif type(attr) == ListType:
attr = tuple(attr)
elif type(attr) != TupleType:
raise AttributeError, "invalid attribute type"
if len(attr) != 2:
raise AttributeError, "invalid attribute length"
if type(attr[0]) not in (NoneType, StringType, UnicodeType):
raise AttributeError, "invalid attribute namespace URI type"
return attr
def _getAttr(self, attr):
attr = self._fixAttr(attr)
try:
return self._attrs[attr]
except:
return None
def _setAttr(self, attr, value):
attr = self._fixAttr(attr)
self._attrs[attr] = str(value)
def _setAttrs(self, attrs):
if type(attrs) in (ListType, TupleType):
for i in range(0, len(attrs), 2):
self._setAttr(attrs[i], attrs[i + 1])
return
if type(attrs) == DictType:
d = attrs
elif isinstance(attrs, anyType):
d = attrs._attrs
else:
raise AttributeError, "invalid attribute type"
for attr, value in d.items():
self._setAttr(attr, value)
def _setMustUnderstand(self, val):
self._setAttr((NS.ENV, "mustUnderstand"), val)
def _getMustUnderstand(self):
return self._getAttr((NS.ENV, "mustUnderstand"))
def _setActor(self, val):
self._setAttr((NS.ENV, "actor"), val)
def _getActor(self):
return self._getAttr((NS.ENV, "actor"))
def _typeName(self):
return self.__class__.__name__[:-4]
def _validNamespaceURI(self, URI, strict):
if not self._typed:
return None
if URI in self._validURIs:
return URI
if not strict:
return self._ns
raise AttributeError, \
"not a valid namespace for type %s" % self._type
class voidType(anyType):
pass
class stringType(anyType):
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (StringType, UnicodeType):
raise AttributeError, "invalid %s type" % self._type
return data
class untypedType(stringType):
def __init__(self, data = None, name = None, attrs = None):
stringType.__init__(self, data, name, 0, attrs)
class IDType(stringType): pass
class NCNameType(stringType): pass
class NameType(stringType): pass
class ENTITYType(stringType): pass
class IDREFType(stringType): pass
class languageType(stringType): pass
class NMTOKENType(stringType): pass
class QNameType(stringType): pass
class tokenType(anyType):
_validURIs = (NS.XSD2, NS.XSD3)
__invalidre = '[\n\t]|^ | $| '
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (StringType, UnicodeType):
raise AttributeError, "invalid %s type" % self._type
if type(self.__invalidre) == StringType:
self.__invalidre = re.compile(self.__invalidre)
if self.__invalidre.search(data):
raise ValueError, "invalid %s value" % self._type
return data
class normalizedStringType(anyType):
_validURIs = (NS.XSD3,)
__invalidre = '[\n\r\t]'
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (StringType, UnicodeType):
raise AttributeError, "invalid %s type" % self._type
if type(self.__invalidre) == StringType:
self.__invalidre = re.compile(self.__invalidre)
if self.__invalidre.search(data):
raise ValueError, "invalid %s value" % self._type
return data
class CDATAType(normalizedStringType):
_validURIs = (NS.XSD2,)
class booleanType(anyType):
def __int__(self):
return self._data
__nonzero__ = __int__
def _marshalData(self):
return ['false', 'true'][self._data]
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if data in (0, '0', 'false', ''):
return 0
if data in (1, '1', 'true'):
return 1
raise ValueError, "invalid %s value" % self._type
class decimalType(anyType):
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (IntType, LongType, FloatType):
raise Error, "invalid %s value" % self._type
return data
class floatType(anyType):
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (IntType, LongType, FloatType) or \
data < -3.4028234663852886E+38 or \
data > 3.4028234663852886E+38:
raise ValueError, "invalid %s value" % self._type
return data
def _marshalData(self):
return "%.18g" % self._data # More precision
class doubleType(anyType):
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (IntType, LongType, FloatType) or \
data < -1.7976931348623158E+308 or \
data > 1.7976931348623157E+308:
raise ValueError, "invalid %s value" % self._type
return data
def _marshalData(self):
return "%.18g" % self._data # More precision
class durationType(anyType):
_validURIs = (NS.XSD3,)
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
try:
# A tuple or a scalar is OK, but make them into a list
if type(data) == TupleType:
data = list(data)
elif type(data) != ListType:
data = [data]
if len(data) > 6:
raise Exception, "too many values"
# Now check the types of all the components, and find
# the first nonzero element along the way.
f = -1
for i in range(len(data)):
if data[i] == None:
data[i] = 0
continue
if type(data[i]) not in \
(IntType, LongType, FloatType):
raise Exception, "element %d a bad type" % i
if data[i] and f == -1:
f = i
# If they're all 0, just use zero seconds.
if f == -1:
self._cache = 'PT0S'
return (0,) * 6
# Make sure only the last nonzero element has a decimal fraction
# and only the first element is negative.
d = -1
for i in range(f, len(data)):
if data[i]:
if d != -1:
raise Exception, \
"all except the last nonzero element must be " \
"integers"
if data[i] < 0 and i > f:
raise Exception, \
"only the first nonzero element can be negative"
elif data[i] != long(data[i]):
d = i
# Pad the list on the left if necessary.
if len(data) < 6:
n = 6 - len(data)
f += n
d += n
data = [0] * n + data
# Save index of the first nonzero element and the decimal
# element for _marshalData.
self.__firstnonzero = f
self.__decimal = d
except Exception, e:
raise ValueError, "invalid %s value - %s" % (self._type, e)
return tuple(data)
def _marshalData(self):
if self._cache == None:
d = self._data
t = 0
if d[self.__firstnonzero] < 0:
s = '-P'
else:
s = 'P'
t = 0
for i in range(self.__firstnonzero, len(d)):
if d[i]:
if i > 2 and not t:
s += 'T'
t = 1
if self.__decimal == i:
s += "%g" % abs(d[i])
else:
s += "%d" % long(abs(d[i]))
s += ['Y', 'M', 'D', 'H', 'M', 'S'][i]
self._cache = s
return self._cache
class timeDurationType(durationType):
_validURIs = (NS.XSD, NS.XSD2, NS.ENC)
class dateTimeType(anyType):
_validURIs = (NS.XSD3,)
def _checkValueSpace(self, data):
try:
if data == None:
data = time.time()
if (type(data) in (IntType, LongType)):
data = list(time.gmtime(data)[:6])
elif (type(data) == FloatType):
f = data - int(data)
data = list(time.gmtime(int(data))[:6])
data[5] += f
elif type(data) in (ListType, TupleType):
if len(data) < 6:
raise Exception, "not enough values"
if len(data) > 9:
raise Exception, "too many values"
data = list(data[:6])
cleanDate(data)
else:
raise Exception, "invalid type"
except Exception, e:
raise ValueError, "invalid %s value - %s" % (self._type, e)
return tuple(data)
def _marshalData(self):
if self._cache == None:
d = self._data
s = "%04d-%02d-%02dT%02d:%02d:%02d" % ((abs(d[0]),) + d[1:])
if d[0] < 0:
s = '-' + s
f = d[5] - int(d[5])
if f != 0:
s += ("%g" % f)[1:]
s += 'Z'
self._cache = s
return self._cache
class recurringInstantType(anyType):
_validURIs = (NS.XSD,)
def _checkValueSpace(self, data):
try:
if data == None:
data = list(time.gmtime(time.time())[:6])
if (type(data) in (IntType, LongType)):
data = list(time.gmtime(data)[:6])
elif (type(data) == FloatType):
f = data - int(data)
data = list(time.gmtime(int(data))[:6])
data[5] += f
elif type(data) in (ListType, TupleType):
if len(data) < 1:
raise Exception, "not enough values"
if len(data) > 9:
raise Exception, "too many values"
data = list(data[:6])
if len(data) < 6:
data += [0] * (6 - len(data))
f = len(data)
for i in range(f):
if data[i] == None:
if f < i:
raise Exception, \
"only leftmost elements can be none"
else:
f = i
break
cleanDate(data, f)
else:
raise Exception, "invalid type"
except Exception, e:
raise ValueError, "invalid %s value - %s" % (self._type, e)
return tuple(data)
def _marshalData(self):
if self._cache == None:
d = self._data
e = list(d)
neg = ''
if e[0] < 0:
neg = '-'
e[0] = abs(e[0])
if not e[0]:
e[0] = '--'
elif e[0] < 100:
e[0] = '-' + "%02d" % e[0]
else:
e[0] = "%04d" % e[0]
for i in range(1, len(e)):
if e[i] == None or (i < 3 and e[i] == 0):
e[i] = '-'
else:
if e[i] < 0:
neg = '-'
e[i] = abs(e[i])
e[i] = "%02d" % e[i]
if d[5]:
f = abs(d[5] - int(d[5]))
if f:
e[5] += ("%g" % f)[1:]
s = "%s%s-%s-%sT%s:%s:%sZ" % ((neg,) + tuple(e))
self._cache = s
return self._cache
class timeInstantType(dateTimeType):
_validURIs = (NS.XSD, NS.XSD2, NS.ENC)
class timePeriodType(dateTimeType):
_validURIs = (NS.XSD2, NS.ENC)
class timeType(anyType):
def _checkValueSpace(self, data):
try:
if data == None:
data = time.gmtime(time.time())[3:6]
elif (type(data) == FloatType):
f = data - int(data)
data = list(time.gmtime(int(data))[3:6])
data[2] += f
elif type(data) in (IntType, LongType):
data = time.gmtime(data)[3:6]
elif type(data) in (ListType, TupleType):
if len(data) == 9:
data = data[3:6]
elif len(data) > 3:
raise Exception, "too many values"
data = [None, None, None] + list(data)
if len(data) < 6:
data += [0] * (6 - len(data))
cleanDate(data, 3)
data = data[3:]
else:
raise Exception, "invalid type"
except Exception, e:
raise ValueError, "invalid %s value - %s" % (self._type, e)
return tuple(data)
def _marshalData(self):
if self._cache == None:
d = self._data
s = ''
s = time.strftime("%H:%M:%S", (0, 0, 0) + d + (0, 0, -1))
f = d[2] - int(d[2])
if f != 0:
s += ("%g" % f)[1:]
s += 'Z'
self._cache = s
return self._cache
class dateType(anyType):
def _checkValueSpace(self, data):
try:
if data == None:
data = time.gmtime(time.time())[0:3]
elif type(data) in (IntType, LongType, FloatType):
data = time.gmtime(data)[0:3]
elif type(data) in (ListType, TupleType):
if len(data) == 9:
data = data[0:3]
elif len(data) > 3:
raise Exception, "too many values"
data = list(data)
if len(data) < 3:
data += [1, 1, 1][len(data):]
data += [0, 0, 0]
cleanDate(data)
data = data[:3]
else:
raise Exception, "invalid type"
except Exception, e:
raise ValueError, "invalid %s value - %s" % (self._type, e)
return tuple(data)
def _marshalData(self):
if self._cache == None:
d = self._data
s = "%04d-%02d-%02dZ" % ((abs(d[0]),) + d[1:])
if d[0] < 0:
s = '-' + s
self._cache = s
return self._cache
class gYearMonthType(anyType):
_validURIs = (NS.XSD3,)
def _checkValueSpace(self, data):
try:
if data == None:
data = time.gmtime(time.time())[0:2]
elif type(data) in (IntType, LongType, FloatType):
data = time.gmtime(data)[0:2]
elif type(data) in (ListType, TupleType):
if len(data) == 9:
data = data[0:2]
elif len(data) > 2:
raise Exception, "too many values"
data = list(data)
if len(data) < 2:
data += [1, 1][len(data):]
data += [1, 0, 0, 0]
cleanDate(data)
data = data[:2]
else:
raise Exception, "invalid type"
except Exception, e:
raise ValueError, "invalid %s value - %s" % (self._type, e)
return tuple(data)
def _marshalData(self):
if self._cache == None:
d = self._data
s = "%04d-%02dZ" % ((abs(d[0]),) + d[1:])
if d[0] < 0:
s = '-' + s
self._cache = s
return self._cache
class gYearType(anyType):
_validURIs = (NS.XSD3,)
def _checkValueSpace(self, data):
try:
if data == None:
data = time.gmtime(time.time())[0:1]
elif type(data) in (IntType, LongType, FloatType):
data = [data]
if type(data) in (ListType, TupleType):
if len(data) == 9:
data = data[0:1]
elif len(data) < 1:
raise Exception, "too few values"
elif len(data) > 1:
raise Exception, "too many values"
if type(data[0]) == FloatType:
try: s = int(data[0])
except: s = long(data[0])
if s != data[0]:
raise Exception, "not integral"
data = [s]
elif type(data[0]) not in (IntType, LongType):
raise Exception, "bad type"
else:
raise Exception, "invalid type"
except Exception, e:
raise ValueError, "invalid %s value - %s" % (self._type, e)
return data[0]
def _marshalData(self):
if self._cache == None:
d = self._data
s = "%04dZ" % abs(d)
if d < 0:
s = '-' + s
self._cache = s
return self._cache
class centuryType(anyType):
_validURIs = (NS.XSD2, NS.ENC)
def _checkValueSpace(self, data):
try:
if data == None:
data = time.gmtime(time.time())[0:1] / 100
elif type(data) in (IntType, LongType, FloatType):
data = [data]
if type(data) in (ListType, TupleType):
if len(data) == 9:
data = data[0:1] / 100
elif len(data) < 1:
raise Exception, "too few values"
elif len(data) > 1:
raise Exception, "too many values"
if type(data[0]) == FloatType:
try: s = int(data[0])
except: s = long(data[0])
if s != data[0]:
raise Exception, "not integral"
data = [s]
elif type(data[0]) not in (IntType, LongType):
raise Exception, "bad type"
else:
raise Exception, "invalid type"
except Exception, e:
raise ValueError, "invalid %s value - %s" % (self._type, e)
return data[0]
def _marshalData(self):
if self._cache == None:
d = self._data
s = "%02dZ" % abs(d)
if d < 0:
s = '-' + s
self._cache = s
return self._cache
class yearType(gYearType):
_validURIs = (NS.XSD2, NS.ENC)
class gMonthDayType(anyType):
_validURIs = (NS.XSD3,)
def _checkValueSpace(self, data):
try:
if data == None:
data = time.gmtime(time.time())[1:3]
elif type(data) in (IntType, LongType, FloatType):
data = time.gmtime(data)[1:3]
elif type(data) in (ListType, TupleType):
if len(data) == 9:
data = data[0:2]
elif len(data) > 2:
raise Exception, "too many values"
data = list(data)
if len(data) < 2:
data += [1, 1][len(data):]
data = [0] + data + [0, 0, 0]
cleanDate(data, 1)
data = data[1:3]
else:
raise Exception, "invalid type"
except Exception, e:
raise ValueError, "invalid %s value - %s" % (self._type, e)
return tuple(data)
def _marshalData(self):
if self._cache == None:
self._cache = "--%02d-%02dZ" % self._data
return self._cache
class recurringDateType(gMonthDayType):
_validURIs = (NS.XSD2, NS.ENC)
class gMonthType(anyType):
_validURIs = (NS.XSD3,)
def _checkValueSpace(self, data):
try:
if data == None:
data = time.gmtime(time.time())[1:2]
elif type(data) in (IntType, LongType, FloatType):
data = [data]
if type(data) in (ListType, TupleType):
if len(data) == 9:
data = data[1:2]
elif len(data) < 1:
raise Exception, "too few values"
elif len(data) > 1:
raise Exception, "too many values"
if type(data[0]) == FloatType:
try: s = int(data[0])
except: s = long(data[0])
if s != data[0]:
raise Exception, "not integral"
data = [s]
elif type(data[0]) not in (IntType, LongType):
raise Exception, "bad type"
if data[0] < 1 or data[0] > 12:
raise Exception, "bad value"
else:
raise Exception, "invalid type"
except Exception, e:
raise ValueError, "invalid %s value - %s" % (self._type, e)
return data[0]
def _marshalData(self):
if self._cache == None:
self._cache = "--%02d--Z" % self._data
return self._cache
class monthType(gMonthType):
_validURIs = (NS.XSD2, NS.ENC)
class gDayType(anyType):
_validURIs = (NS.XSD3,)
def _checkValueSpace(self, data):
try:
if data == None:
data = time.gmtime(time.time())[2:3]
elif type(data) in (IntType, LongType, FloatType):
data = [data]
if type(data) in (ListType, TupleType):
if len(data) == 9:
data = data[2:3]
elif len(data) < 1:
raise Exception, "too few values"
elif len(data) > 1:
raise Exception, "too many values"
if type(data[0]) == FloatType:
try: s = int(data[0])
except: s = long(data[0])
if s != data[0]:
raise Exception, "not integral"
data = [s]
elif type(data[0]) not in (IntType, LongType):
raise Exception, "bad type"
if data[0] < 1 or data[0] > 31:
raise Exception, "bad value"
else:
raise Exception, "invalid type"
except Exception, e:
raise ValueError, "invalid %s value - %s" % (self._type, e)
return data[0]
def _marshalData(self):
if self._cache == None:
self._cache = "---%02dZ" % self._data
return self._cache
class recurringDayType(gDayType):
_validURIs = (NS.XSD2, NS.ENC)
class hexBinaryType(anyType):
_validURIs = (NS.XSD3,)
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (StringType, UnicodeType):
raise AttributeError, "invalid %s type" % self._type
return data
def _marshalData(self):
if self._cache == None:
self._cache = encodeHexString(self._data)
return self._cache
class base64BinaryType(anyType):
_validURIs = (NS.XSD3,)
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (StringType, UnicodeType):
raise AttributeError, "invalid %s type" % self._type
return data
def _marshalData(self):
if self._cache == None:
self._cache = base64.encodestring(self._data)
return self._cache
class base64Type(base64BinaryType):
_validURIs = (NS.ENC,)
class binaryType(anyType):
_validURIs = (NS.XSD, NS.ENC)
def __init__(self, data, name = None, typed = 1, encoding = 'base64',
attrs = None):
anyType.__init__(self, data, name, typed, attrs)
self._setAttr('encoding', encoding)
def _marshalData(self):
if self._cache == None:
if self._getAttr((None, 'encoding')) == 'base64':
self._cache = base64.encodestring(self._data)
else:
self._cache = encodeHexString(self._data)
return self._cache
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (StringType, UnicodeType):
raise AttributeError, "invalid %s type" % self._type
return data
def _setAttr(self, attr, value):
attr = self._fixAttr(attr)
if attr[1] == 'encoding':
if attr[0] != None or value not in ('base64', 'hex'):
raise AttributeError, "invalid encoding"
self._cache = None
anyType._setAttr(self, attr, value)
class anyURIType(anyType):
_validURIs = (NS.XSD3,)
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (StringType, UnicodeType):
raise AttributeError, "invalid %s type" % self._type
return data
def _marshalData(self):
if self._cache == None:
self._cache = urllib.quote(self._data)
return self._cache
class uriType(anyURIType):
_validURIs = (NS.XSD,)
class uriReferenceType(anyURIType):
_validURIs = (NS.XSD2,)
class NOTATIONType(anyType):
def __init__(self, data, name = None, typed = 1, attrs = None):
if self.__class__ == NOTATIONType:
raise Error, "a NOTATION can't be instantiated directly"
anyType.__init__(self, data, name, typed, attrs)
class ENTITIESType(anyType):
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) in (StringType, UnicodeType):
return (data,)
if type(data) not in (ListType, TupleType) or \
filter (lambda x: type(x) not in (StringType, UnicodeType), data):
raise AttributeError, "invalid %s type" % self._type
return data
def _marshalData(self):
return ' '.join(self._data)
class IDREFSType(ENTITIESType): pass
class NMTOKENSType(ENTITIESType): pass
class integerType(anyType):
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (IntType, LongType):
raise ValueError, "invalid %s value" % self._type
return data
class nonPositiveIntegerType(anyType):
_validURIs = (NS.XSD2, NS.XSD3, NS.ENC)
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (IntType, LongType) or data > 0:
raise ValueError, "invalid %s value" % self._type
return data
class non_Positive_IntegerType(nonPositiveIntegerType):
_validURIs = (NS.XSD,)
def _typeName(self):
return 'non-positive-integer'
class negativeIntegerType(anyType):
_validURIs = (NS.XSD2, NS.XSD3, NS.ENC)
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (IntType, LongType) or data >= 0:
raise ValueError, "invalid %s value" % self._type
return data
class negative_IntegerType(negativeIntegerType):
_validURIs = (NS.XSD,)
def _typeName(self):
return 'negative-integer'
class longType(anyType):
_validURIs = (NS.XSD2, NS.XSD3, NS.ENC)
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (IntType, LongType) or \
data < -9223372036854775808L or \
data > 9223372036854775807L:
raise ValueError, "invalid %s value" % self._type
return data
class intType(anyType):
_validURIs = (NS.XSD2, NS.XSD3, NS.ENC)
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (IntType, LongType) or \
data < -2147483648L or \
data > 2147483647:
raise ValueError, "invalid %s value" % self._type
return data
class shortType(anyType):
_validURIs = (NS.XSD2, NS.XSD3, NS.ENC)
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (IntType, LongType) or \
data < -32768 or \
data > 32767:
raise ValueError, "invalid %s value" % self._type
return data
class byteType(anyType):
_validURIs = (NS.XSD2, NS.XSD3, NS.ENC)
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (IntType, LongType) or \
data < -128 or \
data > 127:
raise ValueError, "invalid %s value" % self._type
return data
class nonNegativeIntegerType(anyType):
_validURIs = (NS.XSD2, NS.XSD3, NS.ENC)
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (IntType, LongType) or data < 0:
raise ValueError, "invalid %s value" % self._type
return data
class non_Negative_IntegerType(nonNegativeIntegerType):
_validURIs = (NS.XSD,)
def _typeName(self):
return 'non-negative-integer'
class unsignedLongType(anyType):
_validURIs = (NS.XSD2, NS.XSD3, NS.ENC)
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (IntType, LongType) or \
data < 0 or \
data > 18446744073709551615L:
raise ValueError, "invalid %s value" % self._type
return data
class unsignedIntType(anyType):
_validURIs = (NS.XSD2, NS.XSD3, NS.ENC)
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (IntType, LongType) or \
data < 0 or \
data > 4294967295L:
raise ValueError, "invalid %s value" % self._type
return data
class unsignedShortType(anyType):
_validURIs = (NS.XSD2, NS.XSD3, NS.ENC)
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (IntType, LongType) or \
data < 0 or \
data > 65535:
raise ValueError, "invalid %s value" % self._type
return data
class unsignedByteType(anyType):
_validURIs = (NS.XSD2, NS.XSD3, NS.ENC)
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (IntType, LongType) or \
data < 0 or \
data > 255:
raise ValueError, "invalid %s value" % self._type
return data
class positiveIntegerType(anyType):
_validURIs = (NS.XSD2, NS.XSD3, NS.ENC)
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (IntType, LongType) or data <= 0:
raise ValueError, "invalid %s value" % self._type
return data
class positive_IntegerType(positiveIntegerType):
_validURIs = (NS.XSD,)
def _typeName(self):
return 'positive-integer'
# Now compound types
class compoundType(anyType):
def __init__(self, data = None, name = None, typed = 1, attrs = None):
if self.__class__ == compoundType:
raise Error, "a compound can't be instantiated directly"
anyType.__init__(self, data, name, typed, attrs)
self._aslist = []
self._asdict = {}
self._keyord = []
if type(data) == DictType:
self.__dict__.update(data)
def __getitem__(self, item):
if type(item) == IntType:
return self._aslist[item]
return getattr(self, item)
def __len__(self):
return len(self._aslist)
def __nonzero__(self):
return 1
def _keys(self):
return filter(lambda x: x[0] != '_', self.__dict__.keys())
def _addItem(self, name, value, attrs = None):
d = self._asdict
if d.has_key(name):
if type(d[name]) != ListType:
d[name] = [d[name]]
d[name].append(value)
else:
d[name] = value
self._keyord.append(name)
self._aslist.append(value)
self.__dict__[name] = d[name]
def _placeItem(self, name, value, pos, subpos = 0, attrs = None):
d = self._asdict
if subpos == 0 and type(d[name]) != ListType:
d[name] = value
else:
d[name][subpos] = value
self._keyord[pos] = name
self._aslist[pos] = value
self.__dict__[name] = d[name]
def _getItemAsList(self, name, default = []):
try:
d = self.__dict__[name]
except:
return default
if type(d) == ListType:
return d
return [d]
class structType(compoundType):
pass
class headerType(structType):
_validURIs = (NS.ENV,)
def __init__(self, data = None, typed = 1, attrs = None):
structType.__init__(self, data, "Header", typed, attrs)
class bodyType(structType):
_validURIs = (NS.ENV,)
def __init__(self, data = None, typed = 1, attrs = None):
structType.__init__(self, data, "Body", typed, attrs)
class arrayType(UserList.UserList, compoundType):
def __init__(self, data = None, name = None, attrs = None,
offset = 0, rank = None, asize = 0, elemsname = None):
if data:
if type(data) not in (ListType, TupleType):
raise Error, "Data must be a sequence"
UserList.UserList.__init__(self, data)
compoundType.__init__(self, data, name, 0, attrs)
self._elemsname = elemsname or "item"
if data == None:
self._rank = rank
# According to 5.4.2.2 in the SOAP spec, each element in a
# sparse array must have a position. _posstate keeps track of
# whether we've seen a position or not. It's possible values
# are:
# -1 No elements have been added, so the state is indeterminate
# 0 An element without a position has been added, so no
# elements can have positions
# 1 An element with a position has been added, so all elements
# must have positions
self._posstate = -1
self._full = 0
if asize in ('', None):
asize = '0'
self._dims = map (lambda x: int(x), str(asize).split(','))
self._dims.reverse() # It's easier to work with this way
self._poss = [0] * len(self._dims) # This will end up
# reversed too
for i in range(len(self._dims)):
if self._dims[i] < 0 or \
self._dims[i] == 0 and len(self._dims) > 1:
raise TypeError, "invalid Array dimensions"
if offset > 0:
self._poss[i] = offset % self._dims[i]
offset = int(offset / self._dims[i])
# Don't break out of the loop if offset is 0 so we test all the
# dimensions for > 0.
if offset:
raise AttributeError, "invalid Array offset"
a = [None] * self._dims[0]
for i in range(1, len(self._dims)):
b = []
for j in range(self._dims[i]):
b.append(copy.deepcopy(a))
a = b
self.data = a
def _addItem(self, name, value, attrs):
if self._full:
raise ValueError, "Array is full"
pos = attrs.get((NS.ENC, 'position'))
if pos != None:
if self._posstate == 0:
raise AttributeError, \
"all elements in a sparse Array must have a " \
"position attribute"
self._posstate = 1
try:
if pos[0] == '[' and pos[-1] == ']':
pos = map (lambda x: int(x), pos[1:-1].split(','))
pos.reverse()
if len(pos) == 1:
pos = pos[0]
curpos = [0] * len(self._dims)
for i in range(len(self._dims)):
curpos[i] = pos % self._dims[i]
pos = int(pos / self._dims[i])
if pos == 0:
break
if pos:
raise Exception
elif len(pos) != len(self._dims):
raise Exception
else:
for i in range(len(self._dims)):
if pos[i] >= self._dims[i]:
raise Exception
curpos = pos
else:
raise Exception
except:
raise AttributeError, \
"invalid Array element position %s" % str(pos)
else:
if self._posstate == 1:
raise AttributeError, \
"only elements in a sparse Array may have a " \
"position attribute"
self._posstate = 0
curpos = self._poss
a = self.data
for i in range(len(self._dims) - 1, 0, -1):
a = a[curpos[i]]
if curpos[0] >= len(a):
a += [None] * (len(a) - curpos[0] + 1)
a[curpos[0]] = value
if pos == None:
self._poss[0] += 1
for i in range(len(self._dims) - 1):
if self._poss[i] < self._dims[i]:
break
self._poss[i] = 0
self._poss[i + 1] += 1
if self._dims[-1] and self._poss[-1] >= self._dims[-1]:
self._full = 1
def _placeItem(self, name, value, pos, subpos, attrs = None):
curpos = [0] * len(self._dims)
for i in range(len(self._dims)):
if self._dims[i] == 0:
curpos[0] = pos
break
curpos[i] = pos % self._dims[i]
pos = int(pos / self._dims[i])
if pos == 0:
break
if self._dims[i] != 0 and pos:
raise Error, "array index out of range"
a = self.data
for i in range(len(self._dims) - 1, 0, -1):
a = a[curpos[i]]
if curpos[0] >= len(a):
a += [None] * (len(a) - curpos[0] + 1)
a[curpos[0]] = value
class typedArrayType(arrayType):
def __init__(self, data = None, name = None, typed = None, attrs = None,
offset = 0, rank = None, asize = 0, elemsname = None):
arrayType.__init__(self, data, name, attrs, offset, rank, asize,
elemsname)
self._type = typed
class faultType(structType, Error):
def __init__(self, faultcode = "", faultstring = "", detail = None):
self.faultcode = faultcode
self.faultstring = faultstring
if detail != None:
self.detail = detail
structType.__init__(self, None, 0)
def _setDetail(self, detail = None):
if detail != None:
self.detail = detail
else:
try: del self.detail
except AttributeError: pass
def __repr__(self):
return "<Fault %s: %s>" % (self.faultcode, self.faultstring)
__str__ = __repr__
################################################################################
class RefHolder:
def __init__(self, name, frame):
self.name = name
self.parent = frame
self.pos = len(frame)
self.subpos = frame.namecounts.get(name, 0)
def __repr__(self):
return "<%s %s at %d>" % (self.__class__, self.name, id(self))
################################################################################
# Utility infielders
################################################################################
def collapseWhiteSpace(s):
return re.sub('\s+', ' ', s).strip()
def decodeHexString(data):
conv = {'0': 0x0, '1': 0x1, '2': 0x2, '3': 0x3, '4': 0x4,
'5': 0x5, '6': 0x6, '7': 0x7, '8': 0x8, '9': 0x9, 'a': 0xa,
'b': 0xb, 'c': 0xc, 'd': 0xd, 'e': 0xe, 'f': 0xf, 'A': 0xa,
'B': 0xb, 'C': 0xc, 'D': 0xd, 'E': 0xe, 'F': 0xf,}
ws = string.whitespace
bin = ''
i = 0
while i < len(data):
if data[i] not in ws:
break
i += 1
low = 0
while i < len(data):
c = data[i]
if c in string.whitespace:
break
try:
c = conv[c]
except KeyError:
raise ValueError, \
"invalid hex string character `%s'" % c
if low:
bin += chr(high * 16 + c)
low = 0
else:
high = c
low = 1
i += 1
if low:
raise ValueError, "invalid hex string length"
while i < len(data):
if data[i] not in string.whitespace:
raise ValueError, \
"invalid hex string character `%s'" % c
i += 1
return bin
def encodeHexString(data):
h = ''
for i in data:
h += "%02X" % ord(i)
return h
def leapMonth(year, month):
return month == 2 and \
year % 4 == 0 and \
(year % 100 != 0 or year % 400 == 0)
def cleanDate(d, first = 0):
ranges = (None, (1, 12), (1, 31), (0, 23), (0, 59), (0, 61))
months = (0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31)
names = ('year', 'month', 'day', 'hours', 'minutes', 'seconds')
if len(d) != 6:
raise ValueError, "date must have 6 elements"
for i in range(first, 6):
s = d[i]
if type(s) == FloatType:
if i < 5:
try:
s = int(s)
except OverflowError:
if i > 0:
raise
s = long(s)
if s != d[i]:
raise ValueError, "%s must be integral" % names[i]
d[i] = s
elif type(s) == LongType:
try: s = int(s)
except: pass
elif type(s) != IntType:
raise TypeError, "%s isn't a valid type" % names[i]
if i == first and s < 0:
continue
if ranges[i] != None and \
(s < ranges[i][0] or ranges[i][1] < s):
raise ValueError, "%s out of range" % names[i]
if first < 6 and d[5] >= 61:
raise ValueError, "seconds out of range"
if first < 2:
leap = first < 1 and leapMonth(d[0], d[1])
if d[2] > months[d[1]] + leap:
raise ValueError, "day out of range"
class UnderflowError(exceptions.ArithmeticError):
pass
def debugHeader(title):
s = '*** ' + title + ' '
print s + ('*' * (72 - len(s)))
def debugFooter(title):
print '*' * 72
sys.stdout.flush()
################################################################################
# SOAP Parser
################################################################################
class SOAPParser(xml.sax.handler.ContentHandler):
class Frame:
def __init__(self, name, kind = None, attrs = {}, rules = {}):
self.name = name
self.kind = kind
self.attrs = attrs
self.rules = rules
self.contents = []
self.names = []
self.namecounts = {}
self.subattrs = []
def append(self, name, data, attrs):
self.names.append(name)
self.contents.append(data)
self.subattrs.append(attrs)
if self.namecounts.has_key(name):
self.namecounts[name] += 1
else:
self.namecounts[name] = 1
def _placeItem(self, name, value, pos, subpos = 0, attrs = None):
self.contents[pos] = value
if attrs:
self.attrs.update(attrs)
def __len__(self):
return len(self.contents)
def __repr__(self):
return "<%s %s at %d>" % (self.__class__, self.name, id(self))
def __init__(self, rules = None):
xml.sax.handler.ContentHandler.__init__(self)
self.body = None
self.header = None
self.attrs = {}
self._data = None
self._next = "E" # Keeping state for message validity
self._stack = [self.Frame('SOAP')]
# Make two dictionaries to store the prefix <-> URI mappings, and
# initialize them with the default
self._prem = {NS.XML_T: NS.XML}
self._prem_r = {NS.XML: NS.XML_T}
self._ids = {}
self._refs = {}
self._rules = rules
def startElementNS(self, name, qname, attrs):
# Workaround two sax bugs
if name[0] == None and name[1][0] == ' ':
name = (None, name[1][1:])
else:
name = tuple(name)
# First some checking of the layout of the message
if self._next == "E":
if name[1] != 'Envelope':
raise Error, "expected `SOAP-ENV:Envelope', got `%s:%s'" % \
(self._prem_r[name[0]], name[1])
if name[0] != NS.ENV:
raise faultType, ("%s:VersionMismatch" % NS.ENV_T,
"Don't understand version `%s' Envelope" % name[0])
else:
self._next = "HorB"
elif self._next == "HorB":
if name[0] == NS.ENV and name[1] in ("Header", "Body"):
self._next = None
else:
raise Error, \
"expected `SOAP-ENV:Header' or `SOAP-ENV:Body', " \
"got `%s'" % self._prem_r[name[0]] + ':' + name[1]
elif self._next == "B":
if name == (NS.ENV, "Body"):
self._next = None
else:
raise Error, "expected `SOAP-ENV:Body', got `%s'" % \
self._prem_r[name[0]] + ':' + name[1]
elif self._next == "":
raise Error, "expected nothing, got `%s'" % \
self._prem_r[name[0]] + ':' + name[1]
if len(self._stack) == 2:
rules = self._rules
else:
try:
rules = self._stack[-1].rules[name[1]]
except:
rules = None
if type(rules) not in (NoneType, DictType):
kind = rules
else:
kind = attrs.get((NS.ENC, 'arrayType'))
if kind != None:
del attrs._attrs[(NS.ENC, 'arrayType')]
i = kind.find(':')
if i >= 0:
kind = (self._prem[kind[:i]], kind[i + 1:])
else:
kind = None
self.pushFrame(self.Frame(name[1], kind, attrs._attrs, rules))
self._data = '' # Start accumulating
def pushFrame(self, frame):
self._stack.append(frame)
def popFrame(self):
return self._stack.pop()
def endElementNS(self, name, qname):
# Workaround two sax bugs
if name[0] == None and name[1][0] == ' ':
ns, name = None, name[1][1:]
else:
ns, name = tuple(name)
if self._next == "E":
raise Error, "didn't get SOAP-ENV:Envelope"
if self._next in ("HorB", "B"):
raise Error, "didn't get SOAP-ENV:Body"
cur = self.popFrame()
attrs = cur.attrs
idval = None
if attrs.has_key((None, 'id')):
idval = attrs[(None, 'id')]
if self._ids.has_key(idval):
raise Error, "duplicate id `%s'" % idval
del attrs[(None, 'id')]
root = 1
if len(self._stack) == 3:
if attrs.has_key((NS.ENC, 'root')):
root = int(attrs[(NS.ENC, 'root')])
# Do some preliminary checks. First, if root="0" is present,
# the element must have an id. Next, if root="n" is present,
# n something other than 0 or 1, raise an exception.
if root == 0:
if idval == None:
raise Error, "non-root element must have an id"
elif root != 1:
raise Error, "SOAP-ENC:root must be `0' or `1'"
del attrs[(NS.ENC, 'root')]
while 1:
href = attrs.get((None, 'href'))
if href:
if href[0] != '#':
raise Error, "only do local hrefs right now"
if self._data != None and self._data.strip() != '':
raise Error, "hrefs can't have data"
href = href[1:]
if self._ids.has_key(href):
data = self._ids[href]
else:
data = RefHolder(name, self._stack[-1])
if self._refs.has_key(href):
self._refs[href].append(data)
else:
self._refs[href] = [data]
del attrs[(None, 'href')]
break
kind = None
if attrs:
for i in NS.XSI_L:
if attrs.has_key((i, 'type')):
kind = attrs[(i, 'type')]
del attrs[(i, 'type')]
if kind != None:
i = kind.find(':')
if i >= 0:
kind = (self._prem[kind[:i]], kind[i + 1:])
else:
# XXX What to do here? (None, kind) is just going to fail in convertType
kind = (None, kind)
null = 0
if attrs:
for i in (NS.XSI, NS.XSI2):
if attrs.has_key((i, 'null')):
null = attrs[(i, 'null')]
del attrs[(i, 'null')]
if attrs.has_key((NS.XSI3, 'nil')):
null = attrs[(NS.XSI3, 'nil')]
del attrs[(NS.XSI3, 'nil')]
#MAP 4/12/2002 - must also support "true"
#null = int(null)
null = (str(null).lower() in ['true', '1'])
if null:
if len(cur) or \
(self._data != None and self._data.strip() != ''):
raise Error, "nils can't have data"
data = None
break
if len(self._stack) == 2:
if (ns, name) == (NS.ENV, "Header"):
self.header = data = headerType(attrs = attrs)
self._next = "B"
break
elif (ns, name) == (NS.ENV, "Body"):
self.body = data = bodyType(attrs = attrs)
self._next = ""
break
elif len(self._stack) == 3 and self._next == None:
if (ns, name) == (NS.ENV, "Fault"):
data = faultType()
self._next = ""
break
if cur.rules != None:
rule = cur.rules
if type(rule) in (StringType, UnicodeType):
# XXX Need a namespace here
rule = (None, rule)
elif type(rule) == ListType:
rule = tuple(rule)
# XXX What if rule != kind?
if callable(rule):
data = rule(self._data)
elif type(rule) == DictType:
data = structType(name = (ns, name), attrs = attrs)
else:
data = self.convertType(self._data, rule, attrs)
break
if (kind == None and cur.kind != None) or \
(kind == (NS.ENC, 'Array')):
kind = cur.kind
if kind == None:
kind = 'ur-type[%d]' % len(cur)
else:
kind = kind[1]
if len(cur.namecounts) == 1:
elemsname = cur.names[0]
else:
elemsname = None
data = self.startArray((ns, name), kind, attrs, elemsname)
break
if len(self._stack) == 3 and kind == None and \
len(cur) == 0 and \
(self._data == None or self._data.strip() == ''):
data = structType(name = (ns, name), attrs = attrs)
break
if len(cur) == 0 and ns != NS.URN:
# Nothing's been added to the current frame so it must be a
# simple type.
if kind == None:
# If the current item's container is an array, it will
# have a kind. If so, get the bit before the first [,
# which is the type of the array, therefore the type of
# the current item.
kind = self._stack[-1].kind
if kind != None:
i = kind[1].find('[')
if i >= 0:
kind = (kind[0], kind[1][:i])
elif ns != None:
kind = (ns, name)
if kind != None:
try:
data = self.convertType(self._data, kind, attrs)
except UnknownTypeError:
data = None
else:
data = None
if data == None:
data = self._data or ''
if len(attrs) == 0:
try: data = str(data)
except: pass
break
data = structType(name = (ns, name), attrs = attrs)
break
if isinstance(data, compoundType):
for i in range(len(cur)):
v = cur.contents[i]
data._addItem(cur.names[i], v, cur.subattrs[i])
if isinstance(v, RefHolder):
v.parent = data
if root:
self._stack[-1].append(name, data, attrs)
if idval != None:
self._ids[idval] = data
if self._refs.has_key(idval):
for i in self._refs[idval]:
i.parent._placeItem(i.name, data, i.pos, i.subpos, attrs)
del self._refs[idval]
self.attrs[id(data)] = attrs
if isinstance(data, anyType):
data._setAttrs(attrs)
self._data = None # Stop accumulating
def endDocument(self):
if len(self._refs) == 1:
raise Error, \
"unresolved reference " + self._refs.keys()[0]
elif len(self._refs) > 1:
raise Error, \
"unresolved references " + ', '.join(self._refs.keys())
def startPrefixMapping(self, prefix, uri):
self._prem[prefix] = uri
self._prem_r[uri] = prefix
def endPrefixMapping(self, prefix):
try:
del self._prem_r[self._prem[prefix]]
del self._prem[prefix]
except:
pass
def characters(self, c):
if self._data != None:
self._data += c
arrayre = '^(?:(?P<ns>[^:]*):)?' \
'(?P<type>[^[]+)' \
'(?:\[(?P<rank>,*)\])?' \
'(?:\[(?P<asize>\d+(?:,\d+)*)?\])$'
def startArray(self, name, kind, attrs, elemsname):
if type(self.arrayre) == StringType:
self.arrayre = re.compile (self.arrayre)
offset = attrs.get((NS.ENC, "offset"))
if offset != None:
del attrs[(NS.ENC, "offset")]
try:
if offset[0] == '[' and offset[-1] == ']':
offset = int(offset[1:-1])
if offset < 0:
raise Exception
else:
raise Exception
except:
raise AttributeError, "invalid Array offset"
else:
offset = 0
try:
m = self.arrayre.search(kind)
if m == None:
raise Exception
t = m.group('type')
if t == 'ur-type':
return arrayType(None, name, attrs, offset, m.group('rank'),
m.group('asize'), elemsname)
elif m.group('ns') != None:
return typedArrayType(None, name,
(self._prem[m.group('ns')], t), attrs, offset,
m.group('rank'), m.group('asize'), elemsname)
else:
return typedArrayType(None, name, (None, t), attrs, offset,
m.group('rank'), m.group('asize'), elemsname)
except:
raise AttributeError, "invalid Array type `%s'" % kind
# Conversion
class DATETIMECONSTS:
SIGNre = '(?P<sign>-?)'
CENTURYre = '(?P<century>\d{2,})'
YEARre = '(?P<year>\d{2})'
MONTHre = '(?P<month>\d{2})'
DAYre = '(?P<day>\d{2})'
HOURre = '(?P<hour>\d{2})'
MINUTEre = '(?P<minute>\d{2})'
SECONDre = '(?P<second>\d{2}(?:\.\d*)?)'
TIMEZONEre = '(?P<zulu>Z)|(?P<tzsign>[-+])(?P<tzhour>\d{2}):' \
'(?P<tzminute>\d{2})'
BOSre = '^\s*'
EOSre = '\s*$'
__allres = {'sign': SIGNre, 'century': CENTURYre, 'year': YEARre,
'month': MONTHre, 'day': DAYre, 'hour': HOURre,
'minute': MINUTEre, 'second': SECONDre, 'timezone': TIMEZONEre,
'b': BOSre, 'e': EOSre}
dateTime = '%(b)s%(sign)s%(century)s%(year)s-%(month)s-%(day)sT' \
'%(hour)s:%(minute)s:%(second)s(%(timezone)s)?%(e)s' % __allres
timeInstant = dateTime
timePeriod = dateTime
time = '%(b)s%(hour)s:%(minute)s:%(second)s(%(timezone)s)?%(e)s' % \
__allres
date = '%(b)s%(sign)s%(century)s%(year)s-%(month)s-%(day)s' \
'(%(timezone)s)?%(e)s' % __allres
century = '%(b)s%(sign)s%(century)s(%(timezone)s)?%(e)s' % __allres
gYearMonth = '%(b)s%(sign)s%(century)s%(year)s-%(month)s' \
'(%(timezone)s)?%(e)s' % __allres
gYear = '%(b)s%(sign)s%(century)s%(year)s(%(timezone)s)?%(e)s' % \
__allres
year = gYear
gMonthDay = '%(b)s--%(month)s-%(day)s(%(timezone)s)?%(e)s' % __allres
recurringDate = gMonthDay
gDay = '%(b)s---%(day)s(%(timezone)s)?%(e)s' % __allres
recurringDay = gDay
gMonth = '%(b)s--%(month)s--(%(timezone)s)?%(e)s' % __allres
month = gMonth
recurringInstant = '%(b)s%(sign)s(%(century)s|-)(%(year)s|-)-' \
'(%(month)s|-)-(%(day)s|-)T' \
'(%(hour)s|-):(%(minute)s|-):(%(second)s|-)' \
'(%(timezone)s)?%(e)s' % __allres
duration = '%(b)s%(sign)sP' \
'((?P<year>\d+)Y)?' \
'((?P<month>\d+)M)?' \
'((?P<day>\d+)D)?' \
'((?P<sep>T)' \
'((?P<hour>\d+)H)?' \
'((?P<minute>\d+)M)?' \
'((?P<second>\d*(?:\.\d*)?)S)?)?%(e)s' % \
__allres
timeDuration = duration
# The extra 31 on the front is:
# - so the tuple is 1-based
# - so months[month-1] is December's days if month is 1
months = (31, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31)
def convertDateTime(self, value, kind):
def getZoneOffset(d):
zoffs = 0
try:
if d['zulu'] == None:
zoffs = 60 * int(d['tzhour']) + int(d['tzminute'])
if d['tzsign'] != '-':
zoffs = -zoffs
except TypeError:
pass
return zoffs
def applyZoneOffset(months, zoffs, date, minfield, posday = 1):
if zoffs == 0 and (minfield > 4 or 0 <= date[5] < 60):
return date
if minfield > 5: date[5] = 0
if minfield > 4: date[4] = 0
if date[5] < 0:
date[4] += int(date[5]) / 60
date[5] %= 60
date[4] += zoffs
if minfield > 3 or 0 <= date[4] < 60: return date
date[3] += date[4] / 60
date[4] %= 60
if minfield > 2 or 0 <= date[3] < 24: return date
date[2] += date[3] / 24
date[3] %= 24
if minfield > 1:
if posday and date[2] <= 0:
date[2] += 31 # zoffs is at most 99:59, so the
# day will never be less than -3
return date
while 1:
# The date[1] == 3 (instead of == 2) is because we're
# going back a month, so we need to know if the previous
# month is February, so we test if this month is March.
leap = minfield == 0 and date[1] == 3 and \
date[0] % 4 == 0 and \
(date[0] % 100 != 0 or date[0] % 400 == 0)
if 0 < date[2] <= months[date[1]] + leap: break
date[2] += months[date[1] - 1] + leap
date[1] -= 1
if date[1] > 0: break
date[1] = 12
if minfield > 0: break
date[0] -= 1
return date
try:
exp = getattr(self.DATETIMECONSTS, kind)
except AttributeError:
return None
if type(exp) == StringType:
exp = re.compile(exp)
setattr (self.DATETIMECONSTS, kind, exp)
m = exp.search(value)
try:
if m == None:
raise Exception
d = m.groupdict()
f = ('century', 'year', 'month', 'day',
'hour', 'minute', 'second')
fn = len(f) # Index of first non-None value
r = []
if kind in ('duration', 'timeDuration'):
if d['sep'] != None and d['hour'] == None and \
d['minute'] == None and d['second'] == None:
raise Exception
f = f[1:]
for i in range(len(f)):
s = d[f[i]]
if s != None:
if f[i] == 'second':
s = float(s)
else:
try: s = int(s)
except ValueError: s = long(s)
if i < fn: fn = i
r.append(s)
if fn > len(r): # Any non-Nones?
raise Exception
if d['sign'] == '-':
r[fn] = -r[fn]
return tuple(r)
if kind == 'recurringInstant':
for i in range(len(f)):
s = d[f[i]]
if s == None or s == '-':
if i > fn:
raise Exception
s = None
else:
if i < fn:
fn = i
if f[i] == 'second':
s = float(s)
else:
try:
s = int(s)
except ValueError:
s = long(s)
r.append(s)
s = r.pop(0)
if fn == 0:
r[0] += s * 100
else:
fn -= 1
if fn < len(r) and d['sign'] == '-':
r[fn] = -r[fn]
cleanDate(r, fn)
return tuple(applyZoneOffset(self.DATETIMECONSTS.months,
getZoneOffset(d), r, fn, 0))
r = [0, 0, 1, 1, 0, 0, 0]
for i in range(len(f)):
field = f[i]
s = d.get(field)
if s != None:
if field == 'second':
s = float(s)
else:
try:
s = int(s)
except ValueError:
s = long(s)
if i < fn:
fn = i
r[i] = s
if fn > len(r): # Any non-Nones?
raise Exception
s = r.pop(0)
if fn == 0:
r[0] += s * 100
else:
fn -= 1
if d.get('sign') == '-':
r[fn] = -r[fn]
cleanDate(r, fn)
zoffs = getZoneOffset(d)
if zoffs:
r = applyZoneOffset(self.DATETIMECONSTS.months, zoffs, r, fn)
if kind == 'century':
return r[0] / 100
s = []
for i in range(1, len(f)):
if d.has_key(f[i]):
s.append(r[i - 1])
if len(s) == 1:
return s[0]
return tuple(s)
except Exception, e:
raise Error, "invalid %s value `%s' - %s" % (kind, value, e)
intlimits = \
{
'nonPositiveInteger': (0, None, 0),
'non-positive-integer': (0, None, 0),
'negativeInteger': (0, None, -1),
'negative-integer': (0, None, -1),
'long': (1, -9223372036854775808L,
9223372036854775807L),
'int': (0, -2147483648L, 2147483647),
'short': (0, -32768, 32767),
'byte': (0, -128, 127),
'nonNegativeInteger': (0, 0, None),
'non-negative-integer': (0, 0, None),
'positiveInteger': (0, 1, None),
'positive-integer': (0, 1, None),
'unsignedLong': (1, 0, 18446744073709551615L),
'unsignedInt': (0, 0, 4294967295L),
'unsignedShort': (0, 0, 65535),
'unsignedByte': (0, 0, 255),
}
floatlimits = \
{
'float': (7.0064923216240861E-46, -3.4028234663852886E+38,
3.4028234663852886E+38),
'double': (2.4703282292062327E-324, -1.7976931348623158E+308,
1.7976931348623157E+308),
}
zerofloatre = '[1-9]'
def convertType(self, d, t, attrs):
dnn = d or ''
if t[0] in NS.EXSD_L:
if t[1] == "integer":
try:
d = int(d)
if len(attrs):
d = long(d)
except:
d = long(d)
return d
if self.intlimits.has_key (t[1]):
l = self.intlimits[t[1]]
try: d = int(d)
except: d = long(d)
if l[1] != None and d < l[1]:
raise UnderflowError, "%s too small" % d
if l[2] != None and d > l[2]:
raise OverflowError, "%s too large" % d
if l[0] or len(attrs):
return long(d)
return d
if t[1] == "string":
if len(attrs):
return unicode(dnn)
try:
return str(dnn)
except:
return dnn
if t[1] == "boolean":
d = d.strip().lower()
if d in ('0', 'false'):
return 0
if d in ('1', 'true'):
return 1
raise AttributeError, "invalid boolean value"
if self.floatlimits.has_key (t[1]):
l = self.floatlimits[t[1]]
s = d.strip().lower()
try:
d = float(s)
except:
# Some platforms don't implement the float stuff. This
# is close, but NaN won't be > "INF" as required by the
# standard.
if s in ("nan", "inf"):
return 1e300**2
if s == "-inf":
return -1e300**2
raise
if str (d) == 'nan':
if s != 'nan':
raise ValueError, "invalid %s" % t[1]
elif str (d) == '-inf':
if s != '-inf':
raise UnderflowError, "%s too small" % t[1]
elif str (d) == 'inf':
if s != 'inf':
raise OverflowError, "%s too large" % t[1]
elif d < 0:
if d < l[1]:
raise UnderflowError, "%s too small" % t[1]
elif d > 0:
if d < l[0] or d > l[2]:
raise OverflowError, "%s too large" % t[1]
elif d == 0:
if type(self.zerofloatre) == StringType:
self.zerofloatre = re.compile(self.zerofloatre)
if self.zerofloatre.search(s):
raise UnderflowError, "invalid %s" % t[1]
return d
if t[1] in ("dateTime", "date", "timeInstant", "time"):
return self.convertDateTime(d, t[1])
if t[1] == "decimal":
return float(d)
if t[1] in ("language", "QName", "NOTATION", "NMTOKEN", "Name",
"NCName", "ID", "IDREF", "ENTITY"):
return collapseWhiteSpace(d)
if t[1] in ("IDREFS", "ENTITIES", "NMTOKENS"):
d = collapseWhiteSpace(d)
return d.split()
if t[0] in NS.XSD_L:
if t[1] in ("base64", "base64Binary"):
return base64.decodestring(d)
if t[1] == "hexBinary":
return decodeHexString(d)
if t[1] == "anyURI":
return urllib.unquote(collapseWhiteSpace(d))
if t[1] in ("normalizedString", "token"):
return collapseWhiteSpace(d)
if t[0] == NS.ENC:
if t[1] == "base64":
return base64.decodestring(d)
if t[0] == NS.XSD:
if t[1] == "binary":
try:
e = attrs[(None, 'encoding')]
if e == 'hex':
return decodeHexString(d)
elif e == 'base64':
return base64.decodestring(d)
except:
pass
raise Error, "unknown or missing binary encoding"
if t[1] == "uri":
return urllib.unquote(collapseWhiteSpace(d))
if t[1] == "recurringInstant":
return self.convertDateTime(d, t[1])
if t[0] in (NS.XSD2, NS.ENC):
if t[1] == "uriReference":
return urllib.unquote(collapseWhiteSpace(d))
if t[1] == "timePeriod":
return self.convertDateTime(d, t[1])
if t[1] in ("century", "year"):
return self.convertDateTime(d, t[1])
if t[0] in (NS.XSD, NS.XSD2, NS.ENC):
if t[1] == "timeDuration":
return self.convertDateTime(d, t[1])
if t[0] == NS.XSD3:
if t[1] == "anyURI":
return urllib.unquote(collapseWhiteSpace(d))
if t[1] in ("gYearMonth", "gMonthDay"):
return self.convertDateTime(d, t[1])
if t[1] == "gYear":
return self.convertDateTime(d, t[1])
if t[1] == "gMonth":
return self.convertDateTime(d, t[1])
if t[1] == "gDay":
return self.convertDateTime(d, t[1])
if t[1] == "duration":
return self.convertDateTime(d, t[1])
if t[0] in (NS.XSD2, NS.XSD3):
if t[1] == "token":
return collapseWhiteSpace(d)
if t[1] == "recurringDate":
return self.convertDateTime(d, t[1])
if t[1] == "month":
return self.convertDateTime(d, t[1])
if t[1] == "recurringDay":
return self.convertDateTime(d, t[1])
if t[0] == NS.XSD2:
if t[1] == "CDATA":
return collapseWhiteSpace(d)
raise UnknownTypeError, "unknown type `%s'" % (t[0] + ':' + t[1])
################################################################################
# call to SOAPParser that keeps all of the info
################################################################################
def _parseSOAP(xml_str, rules = None):
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
parser = xml.sax.make_parser()
t = SOAPParser(rules = rules)
parser.setContentHandler(t)
e = xml.sax.handler.ErrorHandler()
parser.setErrorHandler(e)
inpsrc = xml.sax.xmlreader.InputSource()
inpsrc.setByteStream(StringIO(xml_str))
# turn on namespace mangeling
parser.setFeature(xml.sax.handler.feature_namespaces,1)
parser.parse(inpsrc)
return t
################################################################################
# SOAPParser's more public interface
################################################################################
def parseSOAP(xml_str, attrs = 0):
t = _parseSOAP(xml_str)
if attrs:
return t.body, t.attrs
return t.body
def parseSOAPRPC(xml_str, header = 0, body = 0, attrs = 0, rules = None):
t = _parseSOAP(xml_str, rules = rules)
p = t.body._aslist[0]
# Empty string, for RPC this translates into a void
if type(p) in (type(''), type(u'')) and p in ('', u''):
name = "Response"
for k in t.body.__dict__.keys():
if k[0] != "_":
name = k
p = structType(name)
if header or body or attrs:
ret = (p,)
if header : ret += (t.header,)
if body: ret += (t.body,)
if attrs: ret += (t.attrs,)
return ret
else:
return p
################################################################################
# SOAP Builder
################################################################################
class SOAPBuilder:
_xml_top = '<?xml version="1.0"?>\n'
_xml_enc_top = '<?xml version="1.0" encoding="%s"?>\n'
_env_top = '%(ENV_T)s:Envelope %(ENV_T)s:encodingStyle="%(ENC)s"' % \
NS.__dict__
_env_bot = '</%(ENV_T)s:Envelope>\n' % NS.__dict__
# Namespaces potentially defined in the Envelope tag.
_env_ns = {NS.ENC: NS.ENC_T, NS.ENV: NS.ENV_T,
NS.XSD: NS.XSD_T, NS.XSD2: NS.XSD2_T, NS.XSD3: NS.XSD3_T,
NS.XSI: NS.XSI_T, NS.XSI2: NS.XSI2_T, NS.XSI3: NS.XSI3_T}
def __init__(self, args = (), kw = {}, method = None, namespace = None,
header = None, methodattrs = None, envelope = 1, encoding = 'UTF-8',
use_refs = 0, config = Config):
# Test the encoding, raising an exception if it's not known
if encoding != None:
''.encode(encoding)
self.args = args
self.kw = kw
self.envelope = envelope
self.encoding = encoding
self.method = method
self.namespace = namespace
self.header = header
self.methodattrs= methodattrs
self.use_refs = use_refs
self.config = config
self.out = ''
self.tcounter = 0
self.ncounter = 1
self.icounter = 1
self.envns = {}
self.ids = {}
self.depth = 0
self.multirefs = []
self.multis = 0
self.body = not isinstance(args, bodyType)
def build(self):
ns_map = {}
# Cache whether typing is on or not
typed = self.config.typed
if self.header:
# Create a header.
self.dump(self.header, "Header", typed = typed)
self.header = None # Wipe it out so no one is using it.
if self.body:
# Call genns to record that we've used SOAP-ENV.
self.depth += 1
body_ns = self.genns(ns_map, NS.ENV)[0]
self.out += "<%sBody>\n" % body_ns
if self.method:
self.depth += 1
a = ''
if self.methodattrs:
for (k, v) in self.methodattrs.items():
a += ' %s="%s"' % (k, v)
if self.namespace: # Use the namespace info handed to us
methodns, n = self.genns(ns_map, self.namespace)
else:
methodns, n = '', ''
self.out += '<%s%s%s%s%s>\n' % \
(methodns, self.method, n, a, self.genroot(ns_map))
try:
if type(self.args) != TupleType:
args = (self.args,)
else:
args = self.args
for i in args:
self.dump(i, typed = typed, ns_map = ns_map)
for (k, v) in self.kw.items():
self.dump(v, k, typed = typed, ns_map = ns_map)
except RecursionError:
if self.use_refs == 0:
# restart
b = SOAPBuilder(args = self.args, kw = self.kw,
method = self.method, namespace = self.namespace,
header = self.header, methodattrs = self.methodattrs,
envelope = self.envelope, encoding = self.encoding,
use_refs = 1, config = self.config)
return b.build()
raise
if self.method:
self.out += "</%s%s>\n" % (methodns, self.method)
self.depth -= 1
if self.body:
# dump may add to self.multirefs, but the for loop will keep
# going until it has used all of self.multirefs, even those
# entries added while in the loop.
self.multis = 1
for obj, tag in self.multirefs:
self.dump(obj, tag, typed = typed, ns_map = ns_map)
self.out += "</%sBody>\n" % body_ns
self.depth -= 1
if self.envelope:
e = map (lambda ns: 'xmlns:%s="%s"' % (ns[1], ns[0]),
self.envns.items())
self.out = '<' + self._env_top + ' '.join([''] + e) + '>\n' + \
self.out + \
self._env_bot
if self.encoding != None:
self.out = self._xml_enc_top % self.encoding + self.out
return self.out.encode(self.encoding)
return self._xml_top + self.out
def gentag(self):
self.tcounter += 1
return "v%d" % self.tcounter
def genns(self, ns_map, nsURI):
if nsURI == None:
return ('', '')
if type(nsURI) == TupleType: # already a tuple
if len(nsURI) == 2:
ns, nsURI = nsURI
else:
ns, nsURI = None, nsURI[0]
else:
ns = None
if ns_map.has_key(nsURI):
return (ns_map[nsURI] + ':', '')
if self._env_ns.has_key(nsURI):
ns = self.envns[nsURI] = ns_map[nsURI] = self._env_ns[nsURI]
return (ns + ':', '')
if not ns:
ns = "ns%d" % self.ncounter
self.ncounter += 1
ns_map[nsURI] = ns
if self.config.buildWithNamespacePrefix:
return (ns + ':', ' xmlns:%s="%s"' % (ns, nsURI))
else:
return ('', ' xmlns="%s"' % (nsURI))
def genroot(self, ns_map):
if self.depth != 2:
return ''
ns, n = self.genns(ns_map, NS.ENC)
return ' %sroot="%d"%s' % (ns, not self.multis, n)
# checkref checks an element to see if it needs to be encoded as a
# multi-reference element or not. If it returns None, the element has
# been handled and the caller can continue with subsequent elements.
# If it returns a string, the string should be included in the opening
# tag of the marshaled element.
def checkref(self, obj, tag, ns_map):
if self.depth < 2:
return ''
if not self.ids.has_key(id(obj)):
n = self.ids[id(obj)] = self.icounter
self.icounter = n + 1
if self.use_refs == 0:
return ''
if self.depth == 2:
return ' id="i%d"' % n
self.multirefs.append((obj, tag))
else:
if self.use_refs == 0:
raise RecursionError, "Cannot serialize recursive object"
n = self.ids[id(obj)]
if self.multis and self.depth == 2:
return ' id="i%d"' % n
self.out += '<%s href="#i%d"%s/>\n' % (tag, n, self.genroot(ns_map))
return None
# dumpers
def dump(self, obj, tag = None, typed = 1, ns_map = {}):
ns_map = ns_map.copy()
self.depth += 1
if type(tag) not in (NoneType, StringType, UnicodeType):
raise KeyError, "tag must be a string or None"
try:
meth = getattr(self, "dump_" + type(obj).__name__)
meth(obj, tag, typed, ns_map)
except AttributeError:
if type(obj) == LongType:
obj_type = "integer"
else:
obj_type = type(obj).__name__
self.out += self.dumper(None, obj_type, obj, tag, typed,
ns_map, self.genroot(ns_map))
self.depth -= 1
# generic dumper
def dumper(self, nsURI, obj_type, obj, tag, typed = 1, ns_map = {},
rootattr = '', id = '',
xml = '<%(tag)s%(type)s%(id)s%(attrs)s%(root)s>%(data)s</%(tag)s>\n'):
if nsURI == None:
nsURI = self.config.typesNamespaceURI
tag = tag or self.gentag()
a = n = t = ''
if typed and obj_type:
ns, n = self.genns(ns_map, nsURI)
ins = self.genns(ns_map, self.config.schemaNamespaceURI)[0]
t = ' %stype="%s%s"%s' % (ins, ns, obj_type, n)
try: a = obj._marshalAttrs(ns_map, self)
except: pass
try: data = obj._marshalData()
except: data = obj
return xml % {"tag": tag, "type": t, "data": data, "root": rootattr,
"id": id, "attrs": a}
def dump_float(self, obj, tag, typed = 1, ns_map = {}):
# Terrible windows hack
if not good_float:
if obj == float(1e300**2):
obj = "INF"
elif obj == float(-1e300**2):
obj = "-INF"
obj = str(obj)
if obj in ('inf', '-inf'):
obj = str(obj).upper()
elif obj == 'nan':
obj = 'NaN'
self.out += self.dumper(None, "float", obj, tag, typed, ns_map,
self.genroot(ns_map))
def dump_string(self, obj, tag, typed = 0, ns_map = {}):
tag = tag or self.gentag()
id = self.checkref(obj, tag, ns_map)
if id == None:
return
try: data = obj._marshalData()
except: data = obj
self.out += self.dumper(None, "string", cgi.escape(data), tag,
typed, ns_map, self.genroot(ns_map), id)
dump_unicode = dump_string
dump_str = dump_string # 4/12/2002 - MAP - for Python 2.2
def dump_None(self, obj, tag, typed = 0, ns_map = {}):
tag = tag or self.gentag()
ns = self.genns(ns_map, self.config.schemaNamespaceURI)[0]
self.out += '<%s %snull="1"%s/>\n' % (tag, ns, self.genroot(ns_map))
def dump_list(self, obj, tag, typed = 1, ns_map = {}):
if type(obj) == InstanceType:
data = obj.data
else:
data = obj
tag = tag or self.gentag()
id = self.checkref(obj, tag, ns_map)
if id == None:
return
try:
sample = data[0]
empty = 0
except:
sample = structType()
empty = 1
# First scan list to see if all are the same type
same_type = 1
if not empty:
for i in data[1:]:
if type(sample) != type(i) or \
(type(sample) == InstanceType and \
sample.__class__ != i.__class__):
same_type = 0
break
ndecl = ''
if same_type:
if (isinstance(sample, structType)) or \
type(sample) == DictType: # force to urn struct
try:
tns = obj._ns or NS.URN
except:
tns = NS.URN
ns, ndecl = self.genns(ns_map, tns)
try:
typename = last._typename
except:
typename = "SOAPStruct"
t = ns + typename
elif isinstance(sample, anyType):
ns = sample._validNamespaceURI(self.config.typesNamespaceURI,
self.config.strictNamespaces)
if ns:
ns, ndecl = self.genns(ns_map, ns)
t = ns + sample._type
else:
t = 'ur-type'
else:
t = self.genns(ns_map, self.config.typesNamespaceURI)[0] + \
type(sample).__name__
else:
t = self.genns(ns_map, self.config.typesNamespaceURI)[0] + \
"ur-type"
try: a = obj._marshalAttrs(ns_map, self)
except: a = ''
ens, edecl = self.genns(ns_map, NS.ENC)
ins, idecl = self.genns(ns_map, self.config.schemaNamespaceURI)
self.out += \
'<%s %sarrayType="%s[%d]" %stype="%sArray"%s%s%s%s%s%s>\n' %\
(tag, ens, t, len(data), ins, ens, ndecl, edecl, idecl,
self.genroot(ns_map), id, a)
typed = not same_type
try: elemsname = obj._elemsname
except: elemsname = "item"
for i in data:
self.dump(i, elemsname, typed, ns_map)
self.out += '</%s>\n' % tag
dump_tuple = dump_list
def dump_dictionary(self, obj, tag, typed = 1, ns_map = {}):
tag = tag or self.gentag()
id = self.checkref(obj, tag, ns_map)
if id == None:
return
try: a = obj._marshalAttrs(ns_map, self)
except: a = ''
self.out += '<%s%s%s%s>\n' % \
(tag, id, a, self.genroot(ns_map))
for (k, v) in obj.items():
if k[0] != "_":
self.dump(v, k, 1, ns_map)
self.out += '</%s>\n' % tag
dump_dict = dump_dictionary # 4/18/2002 - MAP - for Python 2.2
def dump_instance(self, obj, tag, typed = 1, ns_map = {}):
if not tag:
# If it has a name use it.
if isinstance(obj, anyType) and obj._name:
tag = obj._name
else:
tag = self.gentag()
if isinstance(obj, arrayType): # Array
self.dump_list(obj, tag, typed, ns_map)
return
if isinstance(obj, faultType): # Fault
cns, cdecl = self.genns(ns_map, NS.ENC)
vns, vdecl = self.genns(ns_map, NS.ENV)
self.out += '''<%sFault %sroot="1"%s%s>
<faultcode>%s</faultcode>
<faultstring>%s</faultstring>
''' % (vns, cns, vdecl, cdecl, obj.faultcode, obj.faultstring)
if hasattr(obj, "detail"):
self.dump(obj.detail, "detail", typed, ns_map)
self.out += "</%sFault>\n" % vns
return
r = self.genroot(ns_map)
try: a = obj._marshalAttrs(ns_map, self)
except: a = ''
if isinstance(obj, voidType): # void
self.out += "<%s%s%s></%s>\n" % (tag, a, r, tag)
return
id = self.checkref(obj, tag, ns_map)
if id == None:
return
if isinstance(obj, structType):
# Check for namespace
ndecl = ''
ns = obj._validNamespaceURI(self.config.typesNamespaceURI,
self.config.strictNamespaces)
if ns:
ns, ndecl = self.genns(ns_map, ns)
tag = ns + tag
self.out += "<%s%s%s%s%s>\n" % (tag, ndecl, id, a, r)
# If we have order use it.
order = 1
for i in obj._keys():
if i not in obj._keyord:
order = 0
break
if order:
for i in range(len(obj._keyord)):
self.dump(obj._aslist[i], obj._keyord[i], 1, ns_map)
else:
# don't have pristine order information, just build it.
for (k, v) in obj.__dict__.items():
if k[0] != "_":
self.dump(v, k, 1, ns_map)
if isinstance(obj, bodyType):
self.multis = 1
for v, k in self.multirefs:
self.dump(v, k, typed = typed, ns_map = ns_map)
self.out += '</%s>\n' % tag
elif isinstance(obj, anyType):
t = ''
if typed:
ns = obj._validNamespaceURI(self.config.typesNamespaceURI,
self.config.strictNamespaces)
if ns:
ons, ondecl = self.genns(ns_map, ns)
ins, indecl = self.genns(ns_map,
self.config.schemaNamespaceURI)
t = ' %stype="%s%s"%s%s' % \
(ins, ons, obj._type, ondecl, indecl)
self.out += '<%s%s%s%s%s>%s</%s>\n' % \
(tag, t, id, a, r, obj._marshalData(), tag)
else: # Some Class
self.out += '<%s%s%s>\n' % (tag, id, r)
for (k, v) in obj.__dict__.items():
if k[0] != "_":
self.dump(v, k, 1, ns_map)
self.out += '</%s>\n' % tag
################################################################################
# SOAPBuilder's more public interface
################################################################################
def buildSOAP(args=(), kw={}, method=None, namespace=None, header=None,
methodattrs=None,envelope=1,encoding='UTF-8',config=Config):
t = SOAPBuilder(args=args,kw=kw, method=method, namespace=namespace,
header=header, methodattrs=methodattrs,envelope=envelope,
encoding=encoding, config=config)
return t.build()
################################################################################
# RPC
################################################################################
def SOAPUserAgent():
return "SOAP.py " + __version__ + " (actzero.com)"
################################################################################
# Client
################################################################################
class SOAPAddress:
def __init__(self, url, config = Config):
proto, uri = urllib.splittype(url)
# apply some defaults
if uri[0:2] != '//':
if proto != None:
uri = proto + ':' + uri
uri = '//' + uri
proto = 'http'
host, path = urllib.splithost(uri)
try:
int(host)
host = 'localhost:' + host
except:
pass
if not path:
path = '/'
if proto not in ('http', 'https'):
raise IOError, "unsupported SOAP protocol"
if proto == 'https' and not config.SSLclient:
raise AttributeError, \
"SSL client not supported by this Python installation"
self.proto = proto
self.host = host
self.path = path
def __str__(self):
return "%(proto)s://%(host)s%(path)s" % self.__dict__
__repr__ = __str__
class HTTPTransport:
# Need a Timeout someday?
def call(self, addr, data, soapaction = '', encoding = None,
http_proxy = None, config = Config):
import httplib
if not isinstance(addr, SOAPAddress):
addr = SOAPAddress(addr, config)
# Build a request
if http_proxy:
real_addr = http_proxy
real_path = addr.proto + "://" + addr.host + addr.path
else:
real_addr = addr.host
real_path = addr.path
if addr.proto == 'https':
r = httplib.HTTPS(real_addr)
else:
r = httplib.HTTP(real_addr)
r.putrequest("POST", real_path)
r.putheader("Host", addr.host)
r.putheader("User-agent", SOAPUserAgent())
t = 'text/xml';
if encoding != None:
t += '; charset="%s"' % encoding
r.putheader("Content-type", t)
r.putheader("Content-length", str(len(data)))
r.putheader("SOAPAction", '"%s"' % soapaction)
if config.dumpHeadersOut:
s = 'Outgoing HTTP headers'
debugHeader(s)
print "POST %s %s" % (real_path, r._http_vsn_str)
print "Host:", addr.host
print "User-agent: SOAP.py " + __version__ + " (actzero.com)"
print "Content-type:", t
print "Content-length:", len(data)
print 'SOAPAction: "%s"' % soapaction
debugFooter(s)
r.endheaders()
if config.dumpSOAPOut:
s = 'Outgoing SOAP'
debugHeader(s)
print data,
if data[-1] != '\n':
print
debugFooter(s)
# send the payload
r.send(data)
# read response line
code, msg, headers = r.getreply()
if config.dumpHeadersIn:
s = 'Incoming HTTP headers'
debugHeader(s)
if headers.headers:
print "HTTP/1.? %d %s" % (code, msg)
print "\n".join(map (lambda x: x.strip(), headers.headers))
else:
print "HTTP/0.9 %d %s" % (code, msg)
debugFooter(s)
if config.dumpSOAPIn:
data = r.getfile().read()
s = 'Incoming SOAP'
debugHeader(s)
print data,
if data[-1] != '\n':
print
debugFooter(s)
if code not in (200, 500):
raise HTTPError(code, msg)
if not config.dumpSOAPIn:
data = r.getfile().read()
# return response payload
return data
################################################################################
# SOAP Proxy
################################################################################
class SOAPProxy:
def __init__(self, proxy, namespace = None, soapaction = '',
header = None, methodattrs = None, transport = HTTPTransport,
encoding = 'UTF-8', throw_faults = 1, unwrap_results = 1,
http_proxy=None, config = Config):
# Test the encoding, raising an exception if it's not known
if encoding != None:
''.encode(encoding)
self.proxy = SOAPAddress(proxy, config)
self.namespace = namespace
self.soapaction = soapaction
self.header = header
self.methodattrs = methodattrs
self.transport = transport()
self.encoding = encoding
self.throw_faults = throw_faults
self.unwrap_results = unwrap_results
self.http_proxy = http_proxy
self.config = config
def __call(self, name, args, kw, ns = None, sa = None, hd = None,
ma = None):
ns = ns or self.namespace
ma = ma or self.methodattrs
if sa: # Get soapaction
if type(sa) == TupleType: sa = sa[0]
else:
sa = self.soapaction
if hd: # Get header
if type(hd) == TupleType:
hd = hd[0]
else:
hd = self.header
hd = hd or self.header
if ma: # Get methodattrs
if type(ma) == TupleType: ma = ma[0]
else:
ma = self.methodattrs
ma = ma or self.methodattrs
m = buildSOAP(args = args, kw = kw, method = name, namespace = ns,
header = hd, methodattrs = ma, encoding = self.encoding,
config = self.config)
#print m
r = self.transport.call(self.proxy, m, sa, encoding = self.encoding,
http_proxy = self.http_proxy,
config = self.config)
#print r
p, attrs = parseSOAPRPC(r, attrs = 1)
try:
throw_struct = self.throw_faults and \
isinstance (p, faultType)
except:
throw_struct = 0
if throw_struct:
raise p
# Bubble a regular result up, if there is only element in the
# struct, assume that is the result and return it.
# Otherwise it will return the struct with all the elements
# as attributes.
if self.unwrap_results:
try:
count = 0
for i in p.__dict__.keys():
if i[0] != "_": # don't move the private stuff
count += 1
t = getattr(p, i)
if count == 1: p = t # Only one piece of data, bubble it up
except:
pass
if self.config.returnAllAttrs:
return p, attrs
return p
def _callWithBody(self, body):
return self.__call(None, body, {})
def __getattr__(self, name): # hook to catch method calls
return self.__Method(self.__call, name, config = self.config)
# To handle attribute wierdness
class __Method:
# Some magic to bind a SOAP method to an RPC server.
# Supports "nested" methods (e.g. examples.getStateName) -- concept
# borrowed from xmlrpc/soaplib -- www.pythonware.com
# Altered (improved?) to let you inline namespaces on a per call
# basis ala SOAP::LITE -- www.soaplite.com
def __init__(self, call, name, ns = None, sa = None, hd = None,
ma = None, config = Config):
self.__call = call
self.__name = name
self.__ns = ns
self.__sa = sa
self.__hd = hd
self.__ma = ma
self.__config = config
if self.__name[0] == "_":
if self.__name in ["__repr__","__str__"]:
self.__call__ = self.__repr__
else:
self.__call__ = self.__f_call
else:
self.__call__ = self.__r_call
def __getattr__(self, name):
if self.__name[0] == "_":
# Don't nest method if it is a directive
return self.__class__(self.__call, name, self.__ns,
self.__sa, self.__hd, self.__ma)
return self.__class__(self.__call, "%s.%s" % (self.__name, name),
self.__ns, self.__sa, self.__hd, self.__ma)
def __f_call(self, *args, **kw):
if self.__name == "_ns": self.__ns = args
elif self.__name == "_sa": self.__sa = args
elif self.__name == "_hd": self.__hd = args
elif self.__name == "_ma": self.__ma = args
return self
def __r_call(self, *args, **kw):
return self.__call(self.__name, args, kw, self.__ns, self.__sa,
self.__hd, self.__ma)
def __repr__(self):
return "<%s at %d>" % (self.__class__, id(self))
################################################################################
# Server
################################################################################
# Method Signature class for adding extra info to registered funcs, right now
# used just to indicate it should be called with keywords, instead of ordered
# params.
class MethodSig:
def __init__(self, func, keywords=0, context=0):
self.func = func
self.keywords = keywords
self.context = context
self.__name__ = func.__name__
def __call__(self, *args, **kw):
return apply(self.func,args,kw)
class SOAPContext:
def __init__(self, header, body, attrs, xmldata, connection, httpheaders,
soapaction):
self.header = header
self.body = body
self.attrs = attrs
self.xmldata = xmldata
self.connection = connection
self.httpheaders= httpheaders
self.soapaction = soapaction
# A class to describe how header messages are handled
class HeaderHandler:
# Initially fail out if there are any problems.
def __init__(self, header, attrs):
for i in header.__dict__.keys():
if i[0] == "_":
continue
d = getattr(header, i)
try:
fault = int(attrs[id(d)][(NS.ENV, 'mustUnderstand')])
except:
fault = 0
if fault:
raise faultType, ("%s:MustUnderstand" % NS.ENV_T,
"Don't understand `%s' header element but "
"mustUnderstand attribute is set." % i)
################################################################################
# SOAP Server
################################################################################
class SOAPServer(SocketServer.TCPServer):
import BaseHTTPServer
class SOAPRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
def version_string(self):
return '<a href="http://www.actzero.com/solution.html">' + \
'SOAP.py ' + __version__ + '</a> (Python ' + \
sys.version.split()[0] + ')'
def date_time_string(self):
self.__last_date_time_string = \
SOAPServer.BaseHTTPServer.BaseHTTPRequestHandler.\
date_time_string(self)
return self.__last_date_time_string
def do_POST(self):
try:
if self.server.config.dumpHeadersIn:
s = 'Incoming HTTP headers'
debugHeader(s)
print self.raw_requestline.strip()
print "\n".join(map (lambda x: x.strip(),
self.headers.headers))
debugFooter(s)
data = self.rfile.read(int(self.headers["content-length"]))
if self.server.config.dumpSOAPIn:
s = 'Incoming SOAP'
debugHeader(s)
print data,
if data[-1] != '\n':
print
debugFooter(s)
(r, header, body, attrs) = \
parseSOAPRPC(data, header = 1, body = 1, attrs = 1)
method = r._name
args = r._aslist
kw = r._asdict
ns = r._ns
resp = ""
# For fault messages
if ns:
nsmethod = "%s:%s" % (ns, method)
else:
nsmethod = method
try:
# First look for registered functions
if self.server.funcmap.has_key(ns) and \
self.server.funcmap[ns].has_key(method):
f = self.server.funcmap[ns][method]
else: # Now look at registered objects
# Check for nested attributes. This works even if
# there are none, because the split will return
# [method]
f = self.server.objmap[ns]
l = method.split(".")
for i in l:
f = getattr(f, i)
except:
resp = buildSOAP(faultType("%s:Client" % NS.ENV_T,
"No method %s found" % nsmethod,
"%s %s" % tuple(sys.exc_info()[0:2])),
encoding = self.server.encoding,
config = self.server.config)
status = 500
else:
try:
if header:
x = HeaderHandler(header, attrs)
# If it's wrapped, some special action may be needed
if isinstance(f, MethodSig):
c = None
if f.context: # Build context object
c = SOAPContext(header, body, attrs, data,
self.connection, self.headers,
self.headers["soapaction"])
if f.keywords:
# This is lame, but have to de-unicode
# keywords
strkw = {}
for (k, v) in kw.items():
strkw[str(k)] = v
if c:
strkw["_SOAPContext"] = c
fr = apply(f, (), strkw)
elif c:
fr = apply(f, args, {'_SOAPContext':c})
else:
fr = apply(f, args, {})
else:
fr = apply(f, args, {})
if type(fr) == type(self) and \
isinstance(fr, voidType):
resp = buildSOAP(kw = {'%sResponse' % method: fr},
encoding = self.server.encoding,
config = self.server.config)
else:
resp = buildSOAP(kw =
{'%sResponse' % method: {'Result': fr}},
encoding = self.server.encoding,
config = self.server.config)
except Exception, e:
import traceback
info = sys.exc_info()
if self.server.config.dumpFaultInfo:
s = 'Method %s exception' % nsmethod
debugHeader(s)
traceback.print_exception(info[0], info[1],
info[2])
debugFooter(s)
if isinstance(e, faultType):
f = e
else:
f = faultType("%s:Server" % NS.ENV_T,
"Method %s failed." % nsmethod)
if self.server.config.returnFaultInfo:
f._setDetail("".join(traceback.format_exception(
info[0], info[1], info[2])))
elif not hasattr(f, 'detail'):
f._setDetail("%s %s" % (info[0], info[1]))
resp = buildSOAP(f, encoding = self.server.encoding,
config = self.server.config)
status = 500
else:
status = 200
except faultType, e:
import traceback
info = sys.exc_info()
if self.server.config.dumpFaultInfo:
s = 'Received fault exception'
debugHeader(s)
traceback.print_exception(info[0], info[1],
info[2])
debugFooter(s)
if self.server.config.returnFaultInfo:
e._setDetail("".join(traceback.format_exception(
info[0], info[1], info[2])))
elif not hasattr(e, 'detail'):
e._setDetail("%s %s" % (info[0], info[1]))
resp = buildSOAP(e, encoding = self.server.encoding,
config = self.server.config)
status = 500
except:
# internal error, report as HTTP server error
if self.server.config.dumpFaultInfo:
import traceback
s = 'Internal exception'
debugHeader(s)
traceback.print_exc ()
debugFooter(s)
self.send_response(500)
self.end_headers()
if self.server.config.dumpHeadersOut and \
self.request_version != 'HTTP/0.9':
s = 'Outgoing HTTP headers'
debugHeader(s)
if self.responses.has_key(status):
s = ' ' + self.responses[status][0]
else:
s = ''
print "%s %d%s" % (self.protocol_version, 500, s)
print "Server:", self.version_string()
print "Date:", self.__last_date_time_string
debugFooter(s)
else:
# got a valid SOAP response
self.send_response(status)
t = 'text/xml';
if self.server.encoding != None:
t += '; charset="%s"' % self.server.encoding
self.send_header("Content-type", t)
self.send_header("Content-length", str(len(resp)))
self.end_headers()
if self.server.config.dumpHeadersOut and \
self.request_version != 'HTTP/0.9':
s = 'Outgoing HTTP headers'
debugHeader(s)
if self.responses.has_key(status):
s = ' ' + self.responses[status][0]
else:
s = ''
print "%s %d%s" % (self.protocol_version, status, s)
print "Server:", self.version_string()
print "Date:", self.__last_date_time_string
print "Content-type:", t
print "Content-length:", len(resp)
debugFooter(s)
if self.server.config.dumpSOAPOut:
s = 'Outgoing SOAP'
debugHeader(s)
print resp,
if resp[-1] != '\n':
print
debugFooter(s)
self.wfile.write(resp)
self.wfile.flush()
# We should be able to shut down both a regular and an SSL
# connection, but under Python 2.1, calling shutdown on an
# SSL connections drops the output, so this work-around.
# This should be investigated more someday.
if self.server.config.SSLserver and \
isinstance(self.connection, SSL.Connection):
self.connection.set_shutdown(SSL.SSL_SENT_SHUTDOWN |
SSL.SSL_RECEIVED_SHUTDOWN)
else:
self.connection.shutdown(1)
def log_message(self, format, *args):
if self.server.log:
SOAPServer.BaseHTTPServer.BaseHTTPRequestHandler.\
log_message (self, format, *args)
def __init__(self, addr = ('localhost', 8000),
RequestHandler = SOAPRequestHandler, log = 1, encoding = 'UTF-8',
config = Config, namespace = None, ssl_context = None):
# Test the encoding, raising an exception if it's not known
if encoding != None:
''.encode(encoding)
if ssl_context != None and not config.SSLserver:
raise AttributeError, \
"SSL server not supported by this Python installation"
self.namespace = namespace
self.objmap = {}
self.funcmap = {}
self.ssl_context = ssl_context
self.encoding = encoding
self.config = config
self.log = log
self.allow_reuse_address= 1
SocketServer.TCPServer.__init__(self, addr, RequestHandler)
def get_request(self):
sock, addr = SocketServer.TCPServer.get_request(self)
if self.ssl_context:
sock = SSL.Connection(self.ssl_context, sock)
sock._setup_ssl(addr)
if sock.accept_ssl() != 1:
raise socket.error, "Couldn't accept SSL connection"
return sock, addr
def registerObject(self, object, namespace = ''):
if namespace == '': namespace = self.namespace
self.objmap[namespace] = object
def registerFunction(self, function, namespace = '', funcName = None):
if not funcName : funcName = function.__name__
if namespace == '': namespace = self.namespace
if self.funcmap.has_key(namespace):
self.funcmap[namespace][funcName] = function
else:
self.funcmap[namespace] = {funcName : function}
def registerKWObject(self, object, namespace = ''):
if namespace == '': namespace = self.namespace
for i in dir(object.__class__):
if i[0] != "_" and callable(getattr(object, i)):
self.registerKWFunction(getattr(object,i), namespace)
# convenience - wraps your func for you.
def registerKWFunction(self, function, namespace = '', funcName = None):
self.registerFunction(MethodSig(function,keywords=1), namespace,
funcName)
| gdestuynder/Stupid-python-bot | modules/SOAP.py | Python | gpl-3.0 | 130,750 | [
"Brian"
] | e15d72ac5ce41d7ca3c945f4997a524cc314c04af9f4cdfafc73472dd222f028 |
#
# python-ipfix (c) 2013 Brian Trammell.
#
# Many thanks to the mPlane consortium (http://www.ict-mplane.eu) for
# its material support of this effort.
#
# This program is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option) any
# later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
Representation of IPFIX templates.
Provides template-based packing and unpacking of data in IPFIX messages.
For reading, templates are handled internally. For writing, use
:func:`from_ielist` to create a template.
See :mod:`ipfix.message` for examples.
"""
from . import ie, types, compat
from .compat import izip, xrange, lru_cache
import struct
# Builtin exceptions
class IpfixEncodeError(Exception):
"""Raised on internal encoding errors, or if message MTU is too small"""
def __init__(self, *args):
super(self.__class__, self).__init__(args)
class IpfixDecodeError(Exception):
"""Raised when decoding a malformed IPFIX message"""
def __init__(self, *args):
super(self.__class__, self).__init__(args)
# constants for v9
V9_TEMPLATE_SET_ID = 0
V9_OPTIONS_SET_ID = 1
# constants
TEMPLATE_SET_ID = 2
OPTIONS_SET_ID = 3
# template encoding/decoding structs
_tmplhdr_st = struct.Struct("!HH")
_otmplhdr_st = struct.Struct("!HHH")
_iespec_st = struct.Struct("!HH")
_iepen_st = struct.Struct("!L")
class TemplatePackingPlan(object):
"""
Plan to pack/unpack a specific set of indices for a template.
Used internally by Templates for efficient encoding and decoding.
"""
def __init__(self, tmpl, indices):
self.tmpl = tmpl
self.indices = indices
self.ranks = sorted(xrange(len(indices)), key=indices.__getitem__)
self.valenc = []
self.valdec = []
packstring = "!"
for i, t in enumerate(e.type for e in tmpl.ies):
if i >= tmpl.fixlen_count():
break
if i in indices:
packstring += t.stel
self.valenc.append(t.valenc)
self.valdec.append(t.valdec)
else:
packstring += t.skipel
self.st = struct.Struct(packstring)
def __repr__(self):
return "<TemplatePackingPlan "+repr(self.tmpl) +\
" pack " + str(self.st.format) +\
" indices " + " ".join(str(i) for i in self.indices)+">"
class Template(object):
"""
An IPFIX Template.
A template is an ordered list of IPFIX Information Elements with an ID.
"""
def __init__(self, tid = 0, iterable = None):
if tid < 256 or tid > 65535:
raise ValueError("bad template ID "+str(tid))
self.tid = tid
self.minlength = 0
self.enclength = 0
self.scopecount = 0
self.varlenslice = None
self.packplan = None
self.ies = []
if iterable:
if not isinstance(iterable, ie.InformationElementList):
iterable = ie.InformationElementList(iterable)
for elem in iterable:
self.append(elem)
def __repr__(self):
return "<Template ID "+str(self.tid)+" count "+ \
str(len(self.ies))+" scope "+str(self.scopecount)+">"
def identical_to(self, other):
"""
Determine if two templates are identical to each other.
Two templates are considered identical if they contain the same
IEs in the same order, and the same scope count. Template ID
is not considered as part of the test for template identity.
"""
# FIXME this needs to check IE lengths as well
return (self.ies == other.ies) and (self.scopecount == other.scopecount)
def append(self, ie):
"""Append an IE to this Template"""
self.ies.append(ie)
if ie.length == types.VARLEN:
self.minlength += 1
if self.varlenslice is None:
self.varlenslice = len(self.ies) - 1
else:
self.minlength += ie.length
self.enclength += _iespec_st.size
if (ie.pen):
self.enclength += _iepen_st.size
def count(self):
"""Count IEs in this template"""
return len(self.ies)
def fixlen_count(self):
"""
Count of fixed-length IEs in this template before the first
variable-length IE; this is the size of the portion of the template
which can be encoded/decoded efficiently.
"""
if self.varlenslice is not None:
return self.varlenslice
else:
return self.count()
def finalize(self):
"""Compile a default packing plan. Called after append()ing all IEs."""
self.packplan = TemplatePackingPlan(self, xrange(self.count()))
@lru_cache(maxsize = 32)
def packplan_for_ielist(self, ielist):
"""
Given a list of IEs, devise and cache a packing plan.
Used by the tuple interfaces.
"""
return TemplatePackingPlan(self, [self.ies.index(ie) for ie in ielist])
def decode_from(self, buf, offset, packplan = None):
"""Decodes a record into a tuple containing values in template order"""
# use default packplan unless someone hacked us not to
if not packplan:
packplan = self.packplan
# decode fixed values
vals = [f(v) for f, v in izip(packplan.valdec, packplan.st.unpack_from(buf, offset))]
offset += packplan.st.size
# short circuit on no varlen
if not self.varlenslice:
return (vals, offset)
# direct iteration over remaining IEs
for i, ie in izip(xrange(self.varlenslice, self.count()),
self.ies[self.varlenslice:]):
length = ie.length
if length == types.VARLEN:
(length, offset) = types.decode_varlen(buf, offset)
if i in packplan.indices:
vals.append(ie.type.decode_single_value_from(
buf, offset, length))
offset += length
return (vals, offset)
def decode_namedict_from(self, buf, offset, recinf = None):
"""Decodes a record from a buffer into a dict keyed by IE name."""
(vals, offset) = self.decode_from(buf, offset)
return (dict(( k, v) for k,v in izip((ie.name for ie in self.ies), vals)), offset)
def decode_tuple_from(self, buf, offset, recinf = None):
"""
Decodes a record from a buffer into a tuple,
ordered as the IEs in the InformationElementList given as recinf.
"""
if recinf:
packplan = self.packplan_for_ielist(recinf)
else:
packplan = self.packplan
(vals, offset) = self.decode_from(buf, offset, packplan = packplan)
outvals = tuple(v for i,v in sorted(izip(packplan.ranks, vals)))
# re-sort values in same order as packplan indices
return (outvals, offset)
def encode_to(self, buf, offset, vals, packplan = None):
"""Encodes a record from a tuple containing values in template order"""
# use default packplan unless someone hacked us not to
if not packplan:
packplan = self.packplan
# encode fixed values
fixvals = [f(v) for f,v in izip(packplan.valenc, vals)]
packplan.st.pack_into(buf, offset, *fixvals)
offset += packplan.st.size
# shortcircuit no varlen
if not self.varlenslice:
return offset
# direct iteration over remaining IEs
for i, ie, val in izip(xrange(self.varlenslice, self.count()),
self.ies[self.varlenslice:],
vals[self.varlenslice:]):
if i in packplan.indices:
#print(" encoding "+str(ie))
if ie.length == types.VARLEN:
# FIXME this arrangement requires double-encode of varlen
# values, one to get the length, one to do the encode.
# Fixing this requires a rearrangement of type encoding
# though. For now we'll just say that if you're exporting
# varlen you get to put up with some inefficiency. :)
offset = types.encode_varlen(buf, offset,
len(ie.type.valenc(val)))
offset = ie.type.encode_single_value_to(val, buf, offset)
return offset
def encode_namedict_to(self, buf, offset, rec, recinf = None):
"""Encodes a record from a dict containing values keyed by IE name"""
return self.encode_to(buf, offset, [rec[ie.name] for ie in self.ies])
def encode_tuple_to(self, buf, offset, rec, recinf = None):
"""
Encodes a record from a tuple containing values ordered as the IEs
in the template.
"""
return self.encode_to(buf, offset, rec)
def encode_template_to(self, buf, offset, setid):
"""
Encodes the template to a buffer.
Encodes as a Template if setid is TEMPLATE_SET_ID,
as an Options Template if setid is OPTIONS_SET_ID.
"""
if setid == TEMPLATE_SET_ID:
_tmplhdr_st.pack_into(buf, offset, self.tid, self.count())
offset += _tmplhdr_st.size
elif setid == OPTIONS_SET_ID:
_otmplhdr_st.pack_into(buf, offset, self.tid, self.count(), self.scopecount)
offset += _otmplhdr_st.size
else:
raise IpfixEncodeError("bad template set id "+str(setid))
for e in self.ies:
if e.pen:
_iespec_st.pack_into(buf, offset, e.num | 0x8000, e.length)
offset += _iespec_st.size
_iepen_st.pack_into(buf, offset, e.pen)
offset += _iepen_st.size
else:
_iespec_st.pack_into(buf, offset, e.num, e.length)
offset += _iespec_st.size
return offset
def native_setid(self):
if self.scopecount:
return OPTIONS_SET_ID
else:
return TEMPLATE_SET_ID
def withdrawal_length(setid):
if setid == TEMPLATE_SET_ID:
return _tmplhdr_st.size
elif setid == OPTIONS_SET_ID:
return _otmplhdr_st.size
else:
return IpfixEncodeError("bad template set id "+str(setid))
def encode_withdrawal_to(buf, offset, setid, tid):
if setid == TEMPLATE_SET_ID:
_tmplhdr_st.pack_into(buf, offset, tid, 0)
offset += _tmplhdr_st.size
elif setid == OPTIONS_SET_ID:
_otmplhdr_st.pack_into(buf, offset, tid, 0, 0)
offset += _otmplhdr_st.size
else:
raise IpfixEncodeError("bad template set id "+str(setid))
return offset
def decode_template_from(buf, offset, setid):
"""
Decodes a template from a buffer.
Decodes as a Template if setid is TEMPLATE_SET_ID,
as an Options Template if setid is OPTIONS_SET_ID.
"""
if (setid == TEMPLATE_SET_ID) or (setid == V9_TEMPLATE_SET_ID):
(tid, count) = _tmplhdr_st.unpack_from(buf, offset);
scopecount = 0
offset += _tmplhdr_st.size
elif (setid == OPTIONS_SET_ID) or (setid == V9_OPTIONS_SET_ID):
(tid, count, scopecount) = _otmplhdr_st.unpack_from(buf, offset);
offset += _otmplhdr_st.size
else:
raise IpfixDecodeError("bad template set id "+str(setid))
tmpl = Template(tid)
tmpl.scopecount = scopecount
while count:
(num, length) = _iespec_st.unpack_from(buf, offset)
offset += _iespec_st.size
if num & 0x8000:
num &= 0x7fff
pen = _iepen_st.unpack_from(buf, offset)[0]
offset += _iespec_st.size
else:
pen = 0
tmpl.append(ie.for_template_entry(pen, num, length))
count -= 1
tmpl.finalize()
return (tmpl, offset)
def from_ielist(tid, ielist):
tmpl = Template(tid, ielist)
tmpl.finalize()
return tmpl
def for_specs(tid, *specs):
"""
Create a template from a template ID and a list of IESpecs
:param tid: Template ID, must be between 256 and 65535.
:param *specs: List of IESpecs
:return: A new Template, ready to use for writing to a Message
"""
return from_ielist(tid, ie.spec_list(specs))
| britram/python-ipfix | ipfix/template.py | Python | lgpl-3.0 | 12,819 | [
"Brian"
] | c856730a222d134bef6cb6c8ff54a8c66de1e12b6bea1fbba2fed7c2fc60dfb1 |
from __future__ import print_function, division
import os
import tempfile
import mdtraj as md
import numpy as np
import pandas as pd
import sklearn.pipeline
from mdtraj.testing import eq
from numpy.testing import assert_approx_equal
from six import PY3
from sklearn.externals.joblib import load, dump
from msmbuilder import cluster
from msmbuilder.msm import MarkovStateModel, BayesianMarkovStateModel
from msmbuilder.utils import map_drawn_samples
def test_counts_no_trim():
# test counts matrix without trimming
model = MarkovStateModel(reversible_type=None, ergodic_cutoff=0)
model.fit([[1, 1, 1, 1, 1, 1, 1, 1, 1]])
eq(model.countsmat_, np.array([[8.0]]))
eq(model.mapping_, {1: 0})
def test_counts_trim():
# test counts matrix with trimming
model = MarkovStateModel(reversible_type=None, ergodic_cutoff=1)
model.fit([[1, 1, 1, 1, 1, 1, 1, 1, 1, 2]])
eq(model.mapping_, {1: 0})
eq(model.countsmat_, np.array([[8]]))
def test_counts_scaling():
# test counts matrix scaling
seq = [1] * 4 + [2] * 4 + [1] * 4
model1 = MarkovStateModel(reversible_type=None, lag_time=2,
sliding_window=True).fit([seq])
model2 = MarkovStateModel(reversible_type=None, lag_time=2,
sliding_window=False).fit([seq])
model3 = MarkovStateModel(reversible_type=None, lag_time=2,
ergodic_cutoff='off').fit([seq])
eq(model1.countsmat_, model2.countsmat_)
eq(model1.countsmat_, model3.countsmat_)
eq(model2.countsmat_, model3.countsmat_)
def test_pickle():
model = MarkovStateModel(reversible_type='mle')
model.fit([[0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 2, 2, 2, 2, 0, 0, 0]])
counts = np.array([[8, 1, 1], [1, 3, 0], [1, 0, 3]])
eq(model.countsmat_, counts)
assert np.sum(model.populations_) == 1.0
model.timescales_
# test pickleable
try:
dir = tempfile.mkdtemp()
fn = os.path.join(dir, 'test-msm-temp.npy')
dump(model, fn, compress=1)
model2 = load(fn)
eq(model2.timescales_, model.timescales_)
finally:
os.unlink(fn)
os.rmdir(dir)
def test_fit_on_many_clusterings():
data = [np.random.randn(10, 1), np.random.randn(100, 1)]
print(cluster.KMeans(n_clusters=3).fit_predict(data))
print(cluster.MiniBatchKMeans(n_clusters=3).fit_predict(data))
print(cluster.AffinityPropagation().fit_predict(data))
print(cluster.MeanShift().fit_predict(data))
print(cluster.SpectralClustering(n_clusters=2).fit_predict(data))
print(cluster.AgglomerativeClustering(n_clusters=2).fit_predict(data))
def test_score_ll_1():
model = MarkovStateModel(reversible_type='mle')
sequence = ['a', 'a', 'b', 'b', 'a', 'a', 'b', 'b']
model.fit([sequence])
assert model.mapping_ == {'a': 0, 'b': 1}
score_aa = model.score_ll([['a', 'a']])
assert score_aa == np.log(model.transmat_[0, 0])
score_bb = model.score_ll([['b', 'b']])
assert score_bb == np.log(model.transmat_[1, 1])
score_ab = model.score_ll([['a', 'b']])
assert score_ab == np.log(model.transmat_[0, 1])
score_abb = model.score_ll([['a', 'b', 'b']])
assert score_abb == (np.log(model.transmat_[0, 1]) +
np.log(model.transmat_[1, 1]))
assert model.state_labels_ == ['a', 'b']
assert np.sum(model.populations_) == 1.0
def test_score_ll_2():
# test score_ll
model = MarkovStateModel(reversible_type='mle')
sequence = ['a', 'a', 'b', 'b', 'a', 'a', 'b', 'b', 'c', 'c', 'c', 'a', 'a']
model.fit([sequence])
assert model.mapping_ == {'a': 0, 'b': 1, 'c': 2}
score_ac = model.score_ll([['a', 'c']])
assert score_ac == np.log(model.transmat_[0, 2])
def test_score_ll_novel():
# test score_ll with novel entries
model = MarkovStateModel(reversible_type='mle')
sequence = ['a', 'a', 'b', 'b', 'a', 'a', 'b', 'b']
model.fit([sequence])
assert not np.isfinite(model.score_ll([['c']]))
assert not np.isfinite(model.score_ll([['c', 'c']]))
assert not np.isfinite(model.score_ll([['a', 'c']]))
def test_timescales():
# test timescales
model = MarkovStateModel()
model.fit([[0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 1, 1]])
assert np.all(np.isfinite(model.timescales_))
assert len(model.timescales_) == 1
model.fit([[0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 1, 1, 2, 2, 0, 0]])
assert np.all(np.isfinite(model.timescales_))
assert len(model.timescales_) == 2
assert model.n_states_ == 3
model = MarkovStateModel(n_timescales=1)
model.fit([[0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 1, 1, 2, 2, 0, 0]])
assert len(model.timescales_) == 1
model = MarkovStateModel(n_timescales=100)
model.fit([[0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 1, 1, 2, 2, 0, 0]])
assert len(model.timescales_) == 2
assert np.sum(model.populations_) == 1.0
def test_transform():
model = MarkovStateModel()
model.fit([['a', 'a', 'b', 'b', 'c', 'c', 'a', 'a']])
assert model.mapping_ == {'a': 0, 'b': 1, 'c': 2}
v = model.transform([['a', 'b', 'c']])
assert isinstance(v, list)
assert len(v) == 1
assert v[0].dtype == np.int
np.testing.assert_array_equal(v[0], [0, 1, 2])
v = model.transform([['a', 'b', 'c', 'd']], 'clip')
assert isinstance(v, list)
assert len(v) == 1
assert v[0].dtype == np.int
np.testing.assert_array_equal(v[0], [0, 1, 2])
v = model.transform([['a', 'b', 'c', 'd']], 'fill')
assert isinstance(v, list)
assert len(v) == 1
assert v[0].dtype == np.float
np.testing.assert_array_equal(v[0], [0, 1, 2, np.nan])
v = model.transform([['a', 'a', 'SPLIT', 'b', 'b', 'b']], 'clip')
assert isinstance(v, list)
assert len(v) == 2
assert v[0].dtype == np.int
assert v[1].dtype == np.int
np.testing.assert_array_equal(v[0], [0, 0])
np.testing.assert_array_equal(v[1], [1, 1, 1])
def test_partial_transform():
model = MarkovStateModel()
model.fit([['a', 'a', 'b', 'b', 'c', 'c', 'a', 'a']])
assert model.mapping_ == {'a': 0, 'b': 1, 'c': 2}
v = model.partial_transform(['a', 'b', 'c'])
assert isinstance(v, list)
assert len(v) == 1
assert v[0].dtype == np.int
np.testing.assert_array_equal(v[0], [0, 1, 2])
v = model.partial_transform(['a', 'b', 'c', 'd'], 'clip')
assert isinstance(v, list)
assert len(v) == 1
assert v[0].dtype == np.int
np.testing.assert_array_equal(v[0], [0, 1, 2])
v = model.partial_transform(['a', 'b', 'c', 'd'], 'fill')
assert isinstance(v, np.ndarray)
assert len(v) == 4
assert v.dtype == np.float
np.testing.assert_array_equal(v, [0, 1, 2, np.nan])
v = model.partial_transform(['a', 'a', 'SPLIT', 'b', 'b', 'b'], 'clip')
assert isinstance(v, list)
assert len(v) == 2
assert v[0].dtype == np.int
assert v[1].dtype == np.int
np.testing.assert_array_equal(v[0], [0, 0])
np.testing.assert_array_equal(v[1], [1, 1, 1])
def test_nan():
# what if the input data contains NaN? They should be ignored
model = MarkovStateModel(ergodic_cutoff=0, reversible_type='none')
seq = [0, 1, 0, 1, np.nan]
model.fit(seq)
assert model.n_states_ == 2
assert model.mapping_ == {0: 0, 1: 1}
if not PY3:
model = MarkovStateModel()
seq = [0, 1, 0, None, 0, 1]
model.fit(seq)
assert model.n_states_ == 2
assert model.mapping_ == {0: 0, 1: 1}
def test_inverse_transform():
# test inverse transform
model = MarkovStateModel(reversible_type='transpose', ergodic_cutoff=0)
model.fit([['a', 'b', 'c', 'a', 'a', 'b']])
v = model.inverse_transform([[0, 1, 2]])
assert len(v) == 1
np.testing.assert_array_equal(v[0], ['a', 'b', 'c'])
def test_sample():
# test sample
model = MarkovStateModel()
model.fit([[0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 1, 1, 2, 2, 0, 0]])
sample = model.sample_discrete(n_steps=1000, random_state=0)
assert isinstance(sample, np.ndarray)
assert len(sample) == 1000
bc = np.bincount(sample)
diff = model.populations_ - (bc / np.sum(bc))
assert np.sum(np.abs(diff)) < 0.1
def test_eigtransform_1():
# test eigtransform
model = MarkovStateModel(n_timescales=1)
model.fit([[4, 3, 0, 0, 0, 1, 2, 1, 0, 0, 0, 1, 0, 1, 1, 2, 2, 0, 0]])
assert model.mapping_ == {0: 0, 1: 1, 2: 2}
assert len(model.eigenvalues_) == 2
t = model.eigtransform([[0, 1]], right=True)
assert t[0][0] == model.right_eigenvectors_[0, 1]
assert t[0][1] == model.right_eigenvectors_[1, 1]
s = model.eigtransform([[0, 1]], right=False)
assert s[0][0] == model.left_eigenvectors_[0, 1]
assert s[0][1] == model.left_eigenvectors_[1, 1]
def test_eigtransform_2():
model = MarkovStateModel(n_timescales=2)
traj = [4, 3, 0, 0, 0, 1, 2, 1, 0, 0, 0, 1, 0, 1, 1, 2, 2, 0, 0]
model.fit([traj])
transformed_0 = model.eigtransform([traj], mode='clip')
# clip off the first two states (not ergodic)
assert transformed_0[0].shape == (len(traj) - 2, model.n_timescales)
transformed_1 = model.eigtransform([traj], mode='fill')
assert transformed_1[0].shape == (len(traj), model.n_timescales)
assert np.all(np.isnan(transformed_1[0][:2, :]))
assert not np.any(np.isnan(transformed_1[0][2:]))
def test_normalization():
model = MarkovStateModel(n_timescales=2)
model.fit([[0, 0, 0, 1, 2, 1, 0, 0, 0, 1, 3, 3, 3, 1, 1, 2, 2, 0, 0]])
left_right = np.dot(model.left_eigenvectors_.T, model.right_eigenvectors_)
# check biorthonormal
np.testing.assert_array_almost_equal(
left_right,
np.eye(3))
# check that the stationary left eigenvector is normalized to be 1
np.testing.assert_almost_equal(model.left_eigenvectors_[:, 0].sum(), 1)
# the left eigenvectors satisfy <\phi_i, \phi_i>_{\mu^{-1}} = 1
for i in range(3):
np.testing.assert_almost_equal(
np.dot(model.left_eigenvectors_[:, i],
model.left_eigenvectors_[:, i] / model.populations_), 1)
# and that the right eigenvectors satisfy <\psi_i, \psi_i>_{\mu} = 1
for i in range(3):
np.testing.assert_almost_equal(
np.dot(model.right_eigenvectors_[:, i],
model.right_eigenvectors_[:, i] *
model.populations_), 1)
def test_pipeline():
from msmbuilder.example_datasets import load_doublewell
from msmbuilder.cluster import NDGrid
from sklearn.pipeline import Pipeline
ds = load_doublewell(random_state=0)
p = Pipeline([
('ndgrid', NDGrid(n_bins_per_feature=100)),
('msm', MarkovStateModel(lag_time=100))
])
p.fit(ds.trajectories)
p.named_steps['msm'].summarize()
def test_sample_1():
# Test that the code actually runs and gives something non-crazy
# Make an ergodic dataset with two gaussian centers offset by 25 units.
chunk = np.random.normal(size=(20000, 3))
data = [np.vstack((chunk, chunk + 25)), np.vstack((chunk + 25, chunk))]
clusterer = cluster.KMeans(n_clusters=2)
msm = MarkovStateModel()
pipeline = sklearn.pipeline.Pipeline(
[("clusterer", clusterer), ("msm", msm)]
)
pipeline.fit(data)
trimmed_assignments = pipeline.transform(data)
# Now let's make make the output assignments start with
# zero at the first position.
i0 = trimmed_assignments[0][0]
if i0 == 1:
for m in trimmed_assignments:
m *= -1
m += 1
pairs = msm.draw_samples(trimmed_assignments, 2000)
samples = map_drawn_samples(pairs, data)
mu = np.mean(samples, axis=1)
eq(mu, np.array([[0., 0., 0.0], [25., 25., 25.]]), decimal=1)
# We should make sure we can sample from Trajectory objects too...
# Create a fake topology with 1 atom to match our input dataset
top = md.Topology.from_dataframe(
pd.DataFrame({
"serial": [0], "name": ["HN"], "element": ["H"], "resSeq": [1],
"resName": "RES", "chainID": [0]
}), bonds=np.zeros(shape=(0, 2), dtype='int')
)
# np.newaxis reshapes the data to have a 40000 frames, 1 atom, 3 xyz
trajectories = [md.Trajectory(x[:, np.newaxis], top)
for x in data]
trj_samples = map_drawn_samples(pairs, trajectories)
mu = np.array([t.xyz.mean(0)[0] for t in trj_samples])
eq(mu, np.array([[0., 0., 0.0], [25., 25., 25.]]), decimal=1)
def test_score_1():
# test that GMRQ is equal to the sum of the first n eigenvalues,
# when testing and training on the same dataset.
sequence = [0, 0, 0, 1, 1, 1, 2, 2, 2, 1, 1, 1,
0, 0, 0, 1, 2, 2, 2, 1, 1, 1, 0, 0]
for n in [0, 1, 2]:
model = MarkovStateModel(verbose=False, n_timescales=n)
model.fit([sequence])
assert_approx_equal(model.score([sequence]), model.eigenvalues_.sum())
assert_approx_equal(model.score([sequence]), model.score_)
def test_ergodic_cutoff():
assert (MarkovStateModel(lag_time=10).ergodic_cutoff ==
BayesianMarkovStateModel(lag_time=10).ergodic_cutoff)
assert (MarkovStateModel(lag_time=10)._parse_ergodic_cutoff() ==
BayesianMarkovStateModel(lag_time=10)._parse_ergodic_cutoff())
for cut_off in [0.01, 'on', 'off']:
assert (MarkovStateModel(ergodic_cutoff=cut_off).ergodic_cutoff ==
BayesianMarkovStateModel(ergodic_cutoff=cut_off).ergodic_cutoff)
| stephenliu1989/msmbuilder | msmbuilder/tests/test_msm.py | Python | lgpl-2.1 | 13,518 | [
"Gaussian",
"MDTraj"
] | 4dd877fbd4ba4611c7ab83c9ee128ddc4582ef7780389c2b4573431ca334540f |
#!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = """
---
module: ec2_elb_lb
description:
- Returns information about the load balancer.
- Will be marked changed when called only if state is changed.
short_description: Creates or destroys Amazon ELB.
version_added: "1.5"
author:
- "Jim Dalton (@jsdalton)"
- "Rick Mendes (@rickmendes)"
options:
state:
description:
- Create or destroy the ELB
choices: ["present", "absent"]
required: true
name:
description:
- The name of the ELB
required: true
listeners:
description:
- List of ports/protocols for this ELB to listen on (see example)
required: false
purge_listeners:
description:
- Purge existing listeners on ELB that are not found in listeners
required: false
default: true
zones:
description:
- List of availability zones to enable on this ELB
required: false
purge_zones:
description:
- Purge existing availability zones on ELB that are not found in zones
required: false
default: false
security_group_ids:
description:
- A list of security groups to apply to the elb
require: false
default: None
version_added: "1.6"
security_group_names:
description:
- A list of security group names to apply to the elb
require: false
default: None
version_added: "2.0"
health_check:
description:
- An associative array of health check configuration settings (see example)
require: false
default: None
subnets:
description:
- A list of VPC subnets to use when creating ELB. Zones should be empty if using this.
required: false
default: None
aliases: []
version_added: "1.7"
purge_subnets:
description:
- Purge existing subnet on ELB that are not found in subnets
required: false
default: false
version_added: "1.7"
scheme:
description:
- The scheme to use when creating the ELB. For a private VPC-visible ELB use 'internal'.
required: false
default: 'internet-facing'
version_added: "1.7"
validate_certs:
description:
- When set to "no", SSL certificates will not be validated for boto versions >= 2.6.0.
required: false
default: "yes"
choices: ["yes", "no"]
aliases: []
version_added: "1.5"
connection_draining_timeout:
description:
- Wait a specified timeout allowing connections to drain before terminating an instance
required: false
aliases: []
version_added: "1.8"
cross_az_load_balancing:
description:
- Distribute load across all configured Availability Zones
required: false
default: "no"
choices: ["yes", "no"]
aliases: []
version_added: "1.8"
stickiness:
description:
- An associative array of stickness policy settings. Policy will be applied to all listeners ( see example )
required: false
version_added: "2.0"
extends_documentation_fragment:
- aws
- ec2
"""
EXAMPLES = """
# Note: None of these examples set aws_access_key, aws_secret_key, or region.
# It is assumed that their matching environment variables are set.
# Basic provisioning example (non-VPC)
- local_action:
module: ec2_elb_lb
name: "test-please-delete"
state: present
zones:
- us-east-1a
- us-east-1d
listeners:
- protocol: http # options are http, https, ssl, tcp
load_balancer_port: 80
instance_port: 80
- protocol: https
load_balancer_port: 443
instance_protocol: http # optional, defaults to value of protocol setting
instance_port: 80
# ssl certificate required for https or ssl
ssl_certificate_id: "arn:aws:iam::123456789012:server-certificate/company/servercerts/ProdServerCert"
# Internal ELB example
- local_action:
module: ec2_elb_lb
name: "test-vpc"
scheme: internal
state: present
subnets:
- subnet-abcd1234
- subnet-1a2b3c4d
listeners:
- protocol: http # options are http, https, ssl, tcp
load_balancer_port: 80
instance_port: 80
# Configure a health check
- local_action:
module: ec2_elb_lb
name: "test-please-delete"
state: present
zones:
- us-east-1d
listeners:
- protocol: http
load_balancer_port: 80
instance_port: 80
health_check:
ping_protocol: http # options are http, https, ssl, tcp
ping_port: 80
ping_path: "/index.html" # not required for tcp or ssl
response_timeout: 5 # seconds
interval: 30 # seconds
unhealthy_threshold: 2
healthy_threshold: 10
# Ensure ELB is gone
- local_action:
module: ec2_elb_lb
name: "test-please-delete"
state: absent
# Normally, this module will purge any listeners that exist on the ELB
# but aren't specified in the listeners parameter. If purge_listeners is
# false it leaves them alone
- local_action:
module: ec2_elb_lb
name: "test-please-delete"
state: present
zones:
- us-east-1a
- us-east-1d
listeners:
- protocol: http
load_balancer_port: 80
instance_port: 80
purge_listeners: no
# Normally, this module will leave availability zones that are enabled
# on the ELB alone. If purge_zones is true, then any extraneous zones
# will be removed
- local_action:
module: ec2_elb_lb
name: "test-please-delete"
state: present
zones:
- us-east-1a
- us-east-1d
listeners:
- protocol: http
load_balancer_port: 80
instance_port: 80
purge_zones: yes
# Creates a ELB and assigns a list of subnets to it.
- local_action:
module: ec2_elb_lb
state: present
name: 'New ELB'
security_group_ids: 'sg-123456, sg-67890'
region: us-west-2
subnets: 'subnet-123456,subnet-67890'
purge_subnets: yes
listeners:
- protocol: http
load_balancer_port: 80
instance_port: 80
# Create an ELB with connection draining and cross availability
# zone load balancing
- local_action:
module: ec2_elb_lb
name: "New ELB"
state: present
connection_draining_timeout: 60
cross_az_load_balancing: "yes"
region: us-east-1
zones:
- us-east-1a
- us-east-1d
listeners:
- protocols: http
- load_balancer_port: 80
- instance_port: 80
# Create an ELB with load balanacer stickiness enabled
- local_action:
module: ec2_elb_lb
name: "New ELB"
state: present
region: us-east-1
zones:
- us-east-1a
- us-east-1d
listeners:
- protocols: http
- load_balancer_port: 80
- instance_port: 80
stickiness:
type: loadbalancer
enabled: yes
expiration: 300
# Create an ELB with application stickiness enabled
- local_action:
module: ec2_elb_lb
name: "New ELB"
state: present
region: us-east-1
zones:
- us-east-1a
- us-east-1d
listeners:
- protocols: http
- load_balancer_port: 80
- instance_port: 80
stickiness:
type: application
enabled: yes
cookie: SESSIONID
"""
try:
import boto
import boto.ec2.elb
import boto.ec2.elb.attributes
from boto.ec2.elb.healthcheck import HealthCheck
from boto.regioninfo import RegionInfo
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
class ElbManager(object):
"""Handles ELB creation and destruction"""
def __init__(self, module, name, listeners=None, purge_listeners=None,
zones=None, purge_zones=None, security_group_ids=None,
health_check=None, subnets=None, purge_subnets=None,
scheme="internet-facing", connection_draining_timeout=None,
cross_az_load_balancing=None,
stickiness=None, region=None, **aws_connect_params):
self.module = module
self.name = name
self.listeners = listeners
self.purge_listeners = purge_listeners
self.zones = zones
self.purge_zones = purge_zones
self.security_group_ids = security_group_ids
self.health_check = health_check
self.subnets = subnets
self.purge_subnets = purge_subnets
self.scheme = scheme
self.connection_draining_timeout = connection_draining_timeout
self.cross_az_load_balancing = cross_az_load_balancing
self.stickiness = stickiness
self.aws_connect_params = aws_connect_params
self.region = region
self.changed = False
self.status = 'gone'
self.elb_conn = self._get_elb_connection()
self.elb = self._get_elb()
def ensure_ok(self):
"""Create the ELB"""
if not self.elb:
# Zones and listeners will be added at creation
self._create_elb()
else:
self._set_zones()
self._set_security_groups()
self._set_elb_listeners()
self._set_subnets()
self._set_health_check()
# boto has introduced support for some ELB attributes in
# different versions, so we check first before trying to
# set them to avoid errors
if self._check_attribute_support('connection_draining'):
self._set_connection_draining_timeout()
if self._check_attribute_support('cross_zone_load_balancing'):
self._set_cross_az_load_balancing()
# add sitcky options
self.select_stickiness_policy()
def ensure_gone(self):
"""Destroy the ELB"""
if self.elb:
self._delete_elb()
def get_info(self):
try:
check_elb = self.elb_conn.get_all_load_balancers(self.name)[0]
except:
check_elb = None
if not check_elb:
info = {
'name': self.name,
'status': self.status,
'region': self.region
}
else:
try:
lb_cookie_policy = check_elb.policies.lb_cookie_stickiness_policies[0].__dict__['policy_name']
except:
lb_cookie_policy = None
try:
app_cookie_policy = check_elb.policies.app_cookie_stickiness_policies[0].__dict__['policy_name']
except:
app_cookie_policy = None
info = {
'name': check_elb.name,
'dns_name': check_elb.dns_name,
'zones': check_elb.availability_zones,
'security_group_ids': check_elb.security_groups,
'status': self.status,
'subnets': self.subnets,
'scheme': check_elb.scheme,
'hosted_zone_name': check_elb.canonical_hosted_zone_name,
'hosted_zone_id': check_elb.canonical_hosted_zone_name_id,
'lb_cookie_policy': lb_cookie_policy,
'app_cookie_policy': app_cookie_policy,
'instances': [instance.id for instance in check_elb.instances],
'out_of_service_count': 0,
'in_service_count': 0,
'unknown_instance_state_count': 0,
'region': self.region
}
# status of instances behind the ELB
if info['instances']:
info['instance_health'] = [ dict(
instance_id = instance_state.instance_id,
reason_code = instance_state.reason_code,
state = instance_state.state
) for instance_state in self.elb_conn.describe_instance_health(self.name)]
else:
info['instance_health'] = []
# instance state counts: InService or OutOfService
if info['instance_health']:
for instance_state in info['instance_health']:
if instance_state['state'] == "InService":
info['in_service_count'] += 1
elif instance_state['state'] == "OutOfService":
info['out_of_service_count'] += 1
else:
info['unknown_instance_state_count'] += 1
if check_elb.health_check:
info['health_check'] = {
'target': check_elb.health_check.target,
'interval': check_elb.health_check.interval,
'timeout': check_elb.health_check.timeout,
'healthy_threshold': check_elb.health_check.healthy_threshold,
'unhealthy_threshold': check_elb.health_check.unhealthy_threshold,
}
if check_elb.listeners:
info['listeners'] = [self._api_listener_as_tuple(l)
for l in check_elb.listeners]
elif self.status == 'created':
# When creating a new ELB, listeners don't show in the
# immediately returned result, so just include the
# ones that were added
info['listeners'] = [self._listener_as_tuple(l)
for l in self.listeners]
else:
info['listeners'] = []
if self._check_attribute_support('connection_draining'):
info['connection_draining_timeout'] = self.elb_conn.get_lb_attribute(self.name, 'ConnectionDraining').timeout
if self._check_attribute_support('cross_zone_load_balancing'):
is_cross_az_lb_enabled = self.elb_conn.get_lb_attribute(self.name, 'CrossZoneLoadBalancing')
if is_cross_az_lb_enabled:
info['cross_az_load_balancing'] = 'yes'
else:
info['cross_az_load_balancing'] = 'no'
# return stickiness info?
return info
def _get_elb(self):
elbs = self.elb_conn.get_all_load_balancers()
for elb in elbs:
if self.name == elb.name:
self.status = 'ok'
return elb
def _get_elb_connection(self):
try:
return connect_to_aws(boto.ec2.elb, self.region,
**self.aws_connect_params)
except (boto.exception.NoAuthHandlerFound, StandardError), e:
self.module.fail_json(msg=str(e))
def _delete_elb(self):
# True if succeeds, exception raised if not
result = self.elb_conn.delete_load_balancer(name=self.name)
if result:
self.changed = True
self.status = 'deleted'
def _create_elb(self):
listeners = [self._listener_as_tuple(l) for l in self.listeners]
self.elb = self.elb_conn.create_load_balancer(name=self.name,
zones=self.zones,
security_groups=self.security_group_ids,
complex_listeners=listeners,
subnets=self.subnets,
scheme=self.scheme)
if self.elb:
self.changed = True
self.status = 'created'
def _create_elb_listeners(self, listeners):
"""Takes a list of listener tuples and creates them"""
# True if succeeds, exception raised if not
self.changed = self.elb_conn.create_load_balancer_listeners(self.name,
complex_listeners=listeners)
def _delete_elb_listeners(self, listeners):
"""Takes a list of listener tuples and deletes them from the elb"""
ports = [l[0] for l in listeners]
# True if succeeds, exception raised if not
self.changed = self.elb_conn.delete_load_balancer_listeners(self.name,
ports)
def _set_elb_listeners(self):
"""
Creates listeners specified by self.listeners; overwrites existing
listeners on these ports; removes extraneous listeners
"""
listeners_to_add = []
listeners_to_remove = []
listeners_to_keep = []
# Check for any listeners we need to create or overwrite
for listener in self.listeners:
listener_as_tuple = self._listener_as_tuple(listener)
# First we loop through existing listeners to see if one is
# already specified for this port
existing_listener_found = None
for existing_listener in self.elb.listeners:
# Since ELB allows only one listener on each incoming port, a
# single match on the incoming port is all we're looking for
if existing_listener[0] == int(listener['load_balancer_port']):
existing_listener_found = self._api_listener_as_tuple(existing_listener)
break
if existing_listener_found:
# Does it match exactly?
if listener_as_tuple != existing_listener_found:
# The ports are the same but something else is different,
# so we'll remove the existing one and add the new one
listeners_to_remove.append(existing_listener_found)
listeners_to_add.append(listener_as_tuple)
else:
# We already have this listener, so we're going to keep it
listeners_to_keep.append(existing_listener_found)
else:
# We didn't find an existing listener, so just add the new one
listeners_to_add.append(listener_as_tuple)
# Check for any extraneous listeners we need to remove, if desired
if self.purge_listeners:
for existing_listener in self.elb.listeners:
existing_listener_tuple = self._api_listener_as_tuple(existing_listener)
if existing_listener_tuple in listeners_to_remove:
# Already queued for removal
continue
if existing_listener_tuple in listeners_to_keep:
# Keep this one around
continue
# Since we're not already removing it and we don't need to keep
# it, let's get rid of it
listeners_to_remove.append(existing_listener_tuple)
if listeners_to_remove:
self._delete_elb_listeners(listeners_to_remove)
if listeners_to_add:
self._create_elb_listeners(listeners_to_add)
def _api_listener_as_tuple(self, listener):
"""Adds ssl_certificate_id to ELB API tuple if present"""
base_tuple = listener.get_complex_tuple()
if listener.ssl_certificate_id and len(base_tuple) < 5:
return base_tuple + (listener.ssl_certificate_id,)
return base_tuple
def _listener_as_tuple(self, listener):
"""Formats listener as a 4- or 5-tuples, in the order specified by the
ELB API"""
# N.B. string manipulations on protocols below (str(), upper()) is to
# ensure format matches output from ELB API
listener_list = [
int(listener['load_balancer_port']),
int(listener['instance_port']),
str(listener['protocol'].upper()),
]
# Instance protocol is not required by ELB API; it defaults to match
# load balancer protocol. We'll mimic that behavior here
if 'instance_protocol' in listener:
listener_list.append(str(listener['instance_protocol'].upper()))
else:
listener_list.append(str(listener['protocol'].upper()))
if 'ssl_certificate_id' in listener:
listener_list.append(str(listener['ssl_certificate_id']))
return tuple(listener_list)
def _enable_zones(self, zones):
try:
self.elb.enable_zones(zones)
except boto.exception.BotoServerError, e:
if "Invalid Availability Zone" in e.error_message:
self.module.fail_json(msg=e.error_message)
else:
self.module.fail_json(msg="an unknown server error occurred, please try again later")
self.changed = True
def _disable_zones(self, zones):
try:
self.elb.disable_zones(zones)
except boto.exception.BotoServerError, e:
if "Invalid Availability Zone" in e.error_message:
self.module.fail_json(msg=e.error_message)
else:
self.module.fail_json(msg="an unknown server error occurred, please try again later")
self.changed = True
def _attach_subnets(self, subnets):
self.elb_conn.attach_lb_to_subnets(self.name, subnets)
self.changed = True
def _detach_subnets(self, subnets):
self.elb_conn.detach_lb_from_subnets(self.name, subnets)
self.changed = True
def _set_subnets(self):
"""Determine which subnets need to be attached or detached on the ELB"""
if self.subnets:
if self.purge_subnets:
subnets_to_detach = list(set(self.elb.subnets) - set(self.subnets))
subnets_to_attach = list(set(self.subnets) - set(self.elb.subnets))
else:
subnets_to_detach = None
subnets_to_attach = list(set(self.subnets) - set(self.elb.subnets))
if subnets_to_attach:
self._attach_subnets(subnets_to_attach)
if subnets_to_detach:
self._detach_subnets(subnets_to_detach)
def _set_zones(self):
"""Determine which zones need to be enabled or disabled on the ELB"""
if self.zones:
if self.purge_zones:
zones_to_disable = list(set(self.elb.availability_zones) -
set(self.zones))
zones_to_enable = list(set(self.zones) -
set(self.elb.availability_zones))
else:
zones_to_disable = None
zones_to_enable = list(set(self.zones) -
set(self.elb.availability_zones))
if zones_to_enable:
self._enable_zones(zones_to_enable)
# N.B. This must come second, in case it would have removed all zones
if zones_to_disable:
self._disable_zones(zones_to_disable)
def _set_security_groups(self):
if self.security_group_ids != None and set(self.elb.security_groups) != set(self.security_group_ids):
self.elb_conn.apply_security_groups_to_lb(self.name, self.security_group_ids)
self.Changed = True
def _set_health_check(self):
"""Set health check values on ELB as needed"""
if self.health_check:
# This just makes it easier to compare each of the attributes
# and look for changes. Keys are attributes of the current
# health_check; values are desired values of new health_check
health_check_config = {
"target": self._get_health_check_target(),
"timeout": self.health_check['response_timeout'],
"interval": self.health_check['interval'],
"unhealthy_threshold": self.health_check['unhealthy_threshold'],
"healthy_threshold": self.health_check['healthy_threshold'],
}
update_health_check = False
# The health_check attribute is *not* set on newly created
# ELBs! So we have to create our own.
if not self.elb.health_check:
self.elb.health_check = HealthCheck()
for attr, desired_value in health_check_config.iteritems():
if getattr(self.elb.health_check, attr) != desired_value:
setattr(self.elb.health_check, attr, desired_value)
update_health_check = True
if update_health_check:
self.elb.configure_health_check(self.elb.health_check)
self.changed = True
def _check_attribute_support(self, attr):
return hasattr(boto.ec2.elb.attributes.LbAttributes(), attr)
def _set_cross_az_load_balancing(self):
attributes = self.elb.get_attributes()
if self.cross_az_load_balancing:
attributes.cross_zone_load_balancing.enabled = True
else:
attributes.cross_zone_load_balancing.enabled = False
self.elb_conn.modify_lb_attribute(self.name, 'CrossZoneLoadBalancing',
attributes.cross_zone_load_balancing.enabled)
def _set_connection_draining_timeout(self):
attributes = self.elb.get_attributes()
if self.connection_draining_timeout is not None:
attributes.connection_draining.enabled = True
attributes.connection_draining.timeout = self.connection_draining_timeout
self.elb_conn.modify_lb_attribute(self.name, 'ConnectionDraining', attributes.connection_draining)
else:
attributes.connection_draining.enabled = False
self.elb_conn.modify_lb_attribute(self.name, 'ConnectionDraining', attributes.connection_draining)
def _policy_name(self, policy_type):
return __file__.split('/')[-1].replace('_', '-') + '-' + policy_type
def _create_policy(self, policy_param, policy_meth, policy):
getattr(self.elb_conn, policy_meth )(policy_param, self.elb.name, policy)
def _delete_policy(self, elb_name, policy):
self.elb_conn.delete_lb_policy(elb_name, policy)
def _update_policy(self, policy_param, policy_meth, policy_attr, policy):
self._delete_policy(self.elb.name, policy)
self._create_policy(policy_param, policy_meth, policy)
def _set_listener_policy(self, listeners_dict, policy=[]):
for listener_port in listeners_dict:
if listeners_dict[listener_port].startswith('HTTP'):
self.elb_conn.set_lb_policies_of_listener(self.elb.name, listener_port, policy)
def _set_stickiness_policy(self, elb_info, listeners_dict, policy, **policy_attrs):
for p in getattr(elb_info.policies, policy_attrs['attr']):
if str(p.__dict__['policy_name']) == str(policy[0]):
if str(p.__dict__[policy_attrs['dict_key']]) != str(policy_attrs['param_value']):
self._set_listener_policy(listeners_dict)
self._update_policy(policy_attrs['param_value'], policy_attrs['method'], policy_attrs['attr'], policy[0])
self.changed = True
break
else:
self._create_policy(policy_attrs['param_value'], policy_attrs['method'], policy[0])
self.changed = True
self._set_listener_policy(listeners_dict, policy)
def select_stickiness_policy(self):
if self.stickiness:
if 'cookie' in self.stickiness and 'expiration' in self.stickiness:
self.module.fail_json(msg='\'cookie\' and \'expiration\' can not be set at the same time')
elb_info = self.elb_conn.get_all_load_balancers(self.elb.name)[0]
d = {}
for listener in elb_info.listeners:
d[listener[0]] = listener[2]
listeners_dict = d
if self.stickiness['type'] == 'loadbalancer':
policy = []
policy_type = 'LBCookieStickinessPolicyType'
if self.stickiness['enabled'] == True:
if 'expiration' not in self.stickiness:
self.module.fail_json(msg='expiration must be set when type is loadbalancer')
policy_attrs = {
'type': policy_type,
'attr': 'lb_cookie_stickiness_policies',
'method': 'create_lb_cookie_stickiness_policy',
'dict_key': 'cookie_expiration_period',
'param_value': self.stickiness['expiration']
}
policy.append(self._policy_name(policy_attrs['type']))
self._set_stickiness_policy(elb_info, listeners_dict, policy, **policy_attrs)
elif self.stickiness['enabled'] == False:
if len(elb_info.policies.lb_cookie_stickiness_policies):
if elb_info.policies.lb_cookie_stickiness_policies[0].policy_name == self._policy_name(policy_type):
self.changed = True
else:
self.changed = False
self._set_listener_policy(listeners_dict)
self._delete_policy(self.elb.name, self._policy_name(policy_type))
elif self.stickiness['type'] == 'application':
policy = []
policy_type = 'AppCookieStickinessPolicyType'
if self.stickiness['enabled'] == True:
if 'cookie' not in self.stickiness:
self.module.fail_json(msg='cookie must be set when type is application')
policy_attrs = {
'type': policy_type,
'attr': 'app_cookie_stickiness_policies',
'method': 'create_app_cookie_stickiness_policy',
'dict_key': 'cookie_name',
'param_value': self.stickiness['cookie']
}
policy.append(self._policy_name(policy_attrs['type']))
self._set_stickiness_policy(elb_info, listeners_dict, policy, **policy_attrs)
elif self.stickiness['enabled'] == False:
if len(elb_info.policies.app_cookie_stickiness_policies):
if elb_info.policies.app_cookie_stickiness_policies[0].policy_name == self._policy_name(policy_type):
self.changed = True
self._set_listener_policy(listeners_dict)
self._delete_policy(self.elb.name, self._policy_name(policy_type))
else:
self._set_listener_policy(listeners_dict)
def _get_health_check_target(self):
"""Compose target string from healthcheck parameters"""
protocol = self.health_check['ping_protocol'].upper()
path = ""
if protocol in ['HTTP', 'HTTPS'] and 'ping_path' in self.health_check:
path = self.health_check['ping_path']
return "%s:%s%s" % (protocol, self.health_check['ping_port'], path)
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
state={'required': True, 'choices': ['present', 'absent']},
name={'required': True},
listeners={'default': None, 'required': False, 'type': 'list'},
purge_listeners={'default': True, 'required': False, 'type': 'bool'},
zones={'default': None, 'required': False, 'type': 'list'},
purge_zones={'default': False, 'required': False, 'type': 'bool'},
security_group_ids={'default': None, 'required': False, 'type': 'list'},
security_group_names={'default': None, 'required': False, 'type': 'list'},
health_check={'default': None, 'required': False, 'type': 'dict'},
subnets={'default': None, 'required': False, 'type': 'list'},
purge_subnets={'default': False, 'required': False, 'type': 'bool'},
scheme={'default': 'internet-facing', 'required': False},
connection_draining_timeout={'default': None, 'required': False},
cross_az_load_balancing={'default': None, 'required': False},
stickiness={'default': None, 'required': False, 'type': 'dict'}
)
)
module = AnsibleModule(
argument_spec=argument_spec,
mutually_exclusive = [['security_group_ids', 'security_group_names']]
)
if not HAS_BOTO:
module.fail_json(msg='boto required for this module')
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
if not region:
module.fail_json(msg="Region must be specified as a parameter, in EC2_REGION or AWS_REGION environment variables or in boto configuration file")
name = module.params['name']
state = module.params['state']
listeners = module.params['listeners']
purge_listeners = module.params['purge_listeners']
zones = module.params['zones']
purge_zones = module.params['purge_zones']
security_group_ids = module.params['security_group_ids']
security_group_names = module.params['security_group_names']
health_check = module.params['health_check']
subnets = module.params['subnets']
purge_subnets = module.params['purge_subnets']
scheme = module.params['scheme']
connection_draining_timeout = module.params['connection_draining_timeout']
cross_az_load_balancing = module.params['cross_az_load_balancing']
stickiness = module.params['stickiness']
if state == 'present' and not listeners:
module.fail_json(msg="At least one port is required for ELB creation")
if state == 'present' and not (zones or subnets):
module.fail_json(msg="At least one availability zone or subnet is required for ELB creation")
if security_group_names:
security_group_ids = []
try:
ec2 = ec2_connect(module)
grp_details = ec2.get_all_security_groups()
for group_name in security_group_names:
if isinstance(group_name, basestring):
group_name = [group_name]
group_id = [ str(grp.id) for grp in grp_details if str(grp.name) in group_name ]
security_group_ids.extend(group_id)
except boto.exception.NoAuthHandlerFound, e:
module.fail_json(msg = str(e))
elb_man = ElbManager(module, name, listeners, purge_listeners, zones,
purge_zones, security_group_ids, health_check,
subnets, purge_subnets, scheme,
connection_draining_timeout, cross_az_load_balancing,
stickiness,
region=region, **aws_connect_params)
# check for unsupported attributes for this version of boto
if cross_az_load_balancing and not elb_man._check_attribute_support('cross_zone_load_balancing'):
module.fail_json(msg="You must install boto >= 2.18.0 to use the cross_az_load_balancing attribute")
if connection_draining_timeout and not elb_man._check_attribute_support('connection_draining'):
module.fail_json(msg="You must install boto >= 2.28.0 to use the connection_draining_timeout attribute")
if state == 'present':
elb_man.ensure_ok()
elif state == 'absent':
elb_man.ensure_gone()
ansible_facts = {'ec2_elb': 'info'}
ec2_facts_result = dict(changed=elb_man.changed,
elb=elb_man.get_info(),
ansible_facts=ansible_facts)
module.exit_json(**ec2_facts_result)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
main()
| jarmoni/ansible-modules-core | cloud/amazon/ec2_elb_lb.py | Python | gpl-3.0 | 35,925 | [
"Dalton"
] | a83ec13328ba17849008b80432abc90aba3824e16db09d2e396bfa99f0748de8 |
# mako/ast.py
# Copyright (C) 2006-2013 the Mako authors and contributors <see AUTHORS file>
#
# This module is part of Mako and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""utilities for analyzing expressions and blocks of Python
code, as well as generating Python from AST nodes"""
from mako import exceptions, pyparser, compat
from mako.compat import arg_stringname
import re
class PythonCode(object):
"""represents information about a string containing Python code"""
def __init__(self, code, **exception_kwargs):
self.code = code
# represents all identifiers which are assigned to at some point in
# the code
self.declared_identifiers = set()
# represents all identifiers which are referenced before their
# assignment, if any
self.undeclared_identifiers = set()
# note that an identifier can be in both the undeclared and declared
# lists.
# using AST to parse instead of using code.co_varnames,
# code.co_names has several advantages:
# - we can locate an identifier as "undeclared" even if
# its declared later in the same block of code
# - AST is less likely to break with version changes
# (for example, the behavior of co_names changed a little bit
# in python version 2.5)
if isinstance(code, compat.string_types):
expr = pyparser.parse(code.lstrip(), "exec", **exception_kwargs)
else:
expr = code
f = pyparser.FindIdentifiers(self, **exception_kwargs)
f.visit(expr)
class ArgumentList(object):
"""parses a fragment of code as a comma-separated list of expressions"""
def __init__(self, code, **exception_kwargs):
self.codeargs = []
self.args = []
self.declared_identifiers = set()
self.undeclared_identifiers = set()
if isinstance(code, compat.string_types):
if re.match(r"\S", code) and not re.match(r",\s*$", code):
# if theres text and no trailing comma, insure its parsed
# as a tuple by adding a trailing comma
code += ","
expr = pyparser.parse(code, "exec", **exception_kwargs)
else:
expr = code
f = pyparser.FindTuple(self, PythonCode, **exception_kwargs)
f.visit(expr)
class PythonFragment(PythonCode):
"""extends PythonCode to provide identifier lookups in partial control
statements
e.g.
for x in 5:
elif y==9:
except (MyException, e):
etc.
"""
def __init__(self, code, **exception_kwargs):
m = re.match(r'^(\w+)(?:\s+(.*?))?:\s*(#|$)', code.strip(), re.S)
if not m:
raise exceptions.CompileException(
"Fragment '%s' is not a partial control statement" %
code, **exception_kwargs)
if m.group(3):
code = code[:m.start(3)]
(keyword, expr) = m.group(1,2)
if keyword in ['for','if', 'while']:
code = code + "pass"
elif keyword == 'try':
code = code + "pass\nexcept:pass"
elif keyword == 'elif' or keyword == 'else':
code = "if False:pass\n" + code + "pass"
elif keyword == 'except':
code = "try:pass\n" + code + "pass"
elif keyword == 'with':
code = code + "pass"
else:
raise exceptions.CompileException(
"Unsupported control keyword: '%s'" %
keyword, **exception_kwargs)
super(PythonFragment, self).__init__(code, **exception_kwargs)
class FunctionDecl(object):
"""function declaration"""
def __init__(self, code, allow_kwargs=True, **exception_kwargs):
self.code = code
expr = pyparser.parse(code, "exec", **exception_kwargs)
f = pyparser.ParseFunc(self, **exception_kwargs)
f.visit(expr)
if not hasattr(self, 'funcname'):
raise exceptions.CompileException(
"Code '%s' is not a function declaration" % code,
**exception_kwargs)
if not allow_kwargs and self.kwargs:
raise exceptions.CompileException(
"'**%s' keyword argument not allowed here" %
self.argnames[-1], **exception_kwargs)
def get_argument_expressions(self, include_defaults=True):
"""return the argument declarations of this FunctionDecl as a printable
list."""
namedecls = []
defaults = [d for d in self.defaults]
kwargs = self.kwargs
varargs = self.varargs
argnames = [f for f in self.argnames]
argnames.reverse()
for arg in argnames:
default = None
if kwargs:
arg = "**" + arg_stringname(arg)
kwargs = False
elif varargs:
arg = "*" + arg_stringname(arg)
varargs = False
else:
default = len(defaults) and defaults.pop() or None
if include_defaults and default:
namedecls.insert(0, "%s=%s" %
(arg,
pyparser.ExpressionGenerator(default).value()
)
)
else:
namedecls.insert(0, arg)
return namedecls
class FunctionArgs(FunctionDecl):
"""the argument portion of a function declaration"""
def __init__(self, code, **kwargs):
super(FunctionArgs, self).__init__("def ANON(%s):pass" % code,
**kwargs)
| mcollins12321/anita | venv/lib/python2.7/site-packages/mako/ast.py | Python | mit | 5,750 | [
"VisIt"
] | f6a2a81c0d59aabd6f8d3eff6c62ade026008da60aa5a664a034875375f344fe |
from tao.tests.integration_tests.helper import LiveServerMGFTest
from tao.tests.support.factories import SimulationFactory, GalaxyModelFactory, UserFactory, DataSetFactory, DataSetPropertyFactory, BandPassFilterFactory, StellarModelFactory, SnapshotFactory, GlobalParameterFactory
from tao.models import Simulation, DataSet, GalaxyModel
from tao.settings import MODULE_INDICES
class FilterTests(LiveServerMGFTest):
def setUp(self):
super(FilterTests, self).setUp()
simulation1 = SimulationFactory.create()
simulation2 = SimulationFactory.create()
self.redshifts = ['1.123456789', '2.123456789', '3.123456789']
for unused in range(4):
galaxy_model = GalaxyModelFactory.create()
dataset = DataSetFactory.create(simulation=simulation1, galaxy_model=galaxy_model, max_job_box_count=2)
DataSetPropertyFactory.create(dataset=dataset)
for redshift in self.redshifts:
SnapshotFactory.create(dataset=dataset, redshift=redshift)
for unused in range(5):
galaxy_model = GalaxyModelFactory.create()
dataset = DataSetFactory.create(simulation=simulation2, galaxy_model=galaxy_model)
DataSetPropertyFactory.create(dataset=dataset)
dsp = DataSetPropertyFactory.create(dataset=dataset, is_filter=False)
dataset.default_filter_field = dsp
dataset.save()
for redshift in self.redshifts:
SnapshotFactory.create(dataset=dataset, redshift=redshift)
self.bp_filters = []
for unused in range(3):
self.bp_filters.append(BandPassFilterFactory.create())
StellarModelFactory.create()
username = "user"
password = "password"
UserFactory.create(username=username, password=password)
self.login(username, password)
self.visit('mock_galaxy_factory')
self.click('tao-tabs-' + 'light_cone')
self.select(self.lc_id('catalogue_geometry'), 'Light-Cone')
self.select_dark_matter_simulation(simulation1)
self.select_galaxy_model(simulation1.galaxymodel_set.all().order_by('id')[0])
initial_simulation = Simulation.objects.all().order_by('id')[0]
initial_galaxy_model = initial_simulation.galaxymodel_set.all().order_by('id')[0]
self.initial_dataset = DataSet.objects.get(simulation=initial_simulation, galaxy_model=initial_galaxy_model)
GlobalParameterFactory(parameter_name='INITIAL_JOB_STATUS', parameter_value='HELD')
def tearDown(self):
super(FilterTests, self).tearDown()
def _test_filter_options(self):
# check drop-down list correspond to properties of the currently selected simulation and galaxy model
self.click(self.lc_2select('op_add_all'))
self.click('tao-tabs-' + 'sed')
self.click(self.sed('apply_sed'))
self.click(self.sed_2select('op_add_all'))
expected_filter_options = self.get_expected_filter_options(self.initial_dataset)
actual_filter_options = self.get_actual_filter_options()
self.assertEqual(expected_filter_options, actual_filter_options)
def _test_filter_options_with_band_pass_filter(self):
# check drop-down list correspond to properties of the currently selected simulation and galaxy model
# plus selected band-pass filter
self.click(self.lc_2select('op_add_all'))
self.click('tao-tabs-' + 'sed')
self.click(self.sed('apply_sed'))
self.click(self.sed_2select('op_add_all'))
expected_filter_options = self.get_expected_filter_options(self.initial_dataset.id)
actual_filter_options = self.get_actual_filter_options()
self.assertEqual(expected_filter_options, actual_filter_options)
def _test_filter_options_with_selected_band_pass_filter_after_submit_error(self):
# check drop-down list correspond to properties of the currently selected simulation and galaxy model
# plus selected band-pass filter
self.click(self.lc_2select('op_add_all'))
self.click('tao-tabs-' + 'sed')
self.click(self.sed('apply_sed'))
self.click(self.sed_2select('op_add_all'))
self.click('tao-tabs-' + 'record_filter')
self.select_record_filter(self.bp_filters[1])
self.submit_mgf_form()
self.assert_errors_on_field(True, self.rf_id('min'))
self.assert_errors_on_field(True, self.rf_id('max'))
def _test_filter_options_with_selected_band_pass_filter_submit_ok(self):
# check drop-down list correspond to properties of the currently selected simulation and galaxy model
# plus selected band-pass filter
self.fill_in_fields({'ra_opening_angle':'12', 'dec_opening_angle': '10', 'redshift_min':'0', 'redshift_max':'0.2'}, id_wrap=self.lc_id)
self.click(self.lc_2select('op_add_all'))
self.click('tao-tabs-' + 'sed')
self.click(self.sed('apply_sed'))
self.click(self.sed_2select('op_add_all'))
self.click('tao-tabs-' + 'record_filter')
self.select_record_filter(self.bp_filters[1])
self.fill_in_fields({'max': '12.3', 'min': ''}, id_wrap=self.rf_id)
self.submit_mgf_form()
self.assert_on_page('job_index')
def _test_filter_options_with_selected_band_pass_filter_after_submit_other_errors(self):
# check drop-down list correspond to properties of the currently selected simulation and galaxy model
# plus selected band-pass filter
self.click(self.lc_2select('op_add_all'))
self.click('tao-tabs-' + 'sed')
self.click(self.sed('apply_sed'))
self.click(self.sed_2select('op_add_all'))
self.click('tao-tabs-' + 'record_filter')
self.select_record_filter(self.bp_filters[1], 'apparent')
self.fill_in_fields({'max': '12.3', 'min': ''}, id_wrap=self.rf_id)
self.submit_mgf_form()
self.assert_errors_on_field(True, self.lc_id('redshift_min'))
self.click('tao-tabs-' + 'record_filter')
self.assertEqual(self.bp_filters[1].label + ' (Apparent)', self.get_selected_option_text(self.rf_id('filter')))
self.assert_attribute_equals('value', {self.rf_id('min'):'',self.rf_id('max'):'12.3'})
def _test_filter_options_and_is_filter(self):
# check drop-down list correspond to properties of the currently selected simulation and galaxy model
simulation = Simulation.objects.all().order_by('id')[1]
galaxy_model = simulation.galaxymodel_set.all().order_by('id')[0]
dataset = DataSet.objects.get(simulation=simulation, galaxy_model=galaxy_model)
self.select_dark_matter_simulation(simulation)
self.select_galaxy_model(galaxy_model)
self.click(self.lc_2select('op_add_all'))
self.click('tao-tabs-' + 'sed')
self.click(self.sed('apply_sed'))
self.click(self.sed_2select('op_add_all'))
expected_filter_options = self.get_expected_filter_options(dataset.id)
actual_filter_options = self.get_actual_filter_options()
self.assertEqual(expected_filter_options, actual_filter_options)
def _test_filter_updates(self):
# check drop-down list updates when simulation or galaxy model is changed
simulation = Simulation.objects.all()[1]
galaxy_model = simulation.galaxymodel_set.all()[4]
dataset = DataSet.objects.get(simulation=simulation, galaxy_model=galaxy_model)
expected_filter_options = self.get_expected_filter_options(dataset.id)
self.select_dark_matter_simulation(simulation)
self.select_galaxy_model(galaxy_model)
self.click(self.lc_2select('op_add_all'))
self.click('tao-tabs-' + 'sed')
self.click(self.sed('apply_sed'))
self.click(self.sed_2select('op_add_all'))
actual_filter_options = self.get_actual_filter_options()
self.assertEqual(expected_filter_options, actual_filter_options)
def _test_snapshot_updates(self):
# check drop-down list updates when simulation or galaxy model is changed
simulation = Simulation.objects.all()[1]
galaxy_model = simulation.galaxymodel_set.all()[4]
dataset = DataSet.objects.get(simulation=simulation, galaxy_model=galaxy_model)
snapshots = dataset.snapshot_set.all()
expected_snapshot_options = self.get_expected_snapshot_options(snapshots)
self.select_dark_matter_simulation(simulation)
self.select_galaxy_model(galaxy_model)
actual_snapshot_options = self.get_actual_snapshot_options()
self.assertEqual(expected_snapshot_options, actual_snapshot_options)
def _test_max_min_fields(self):
simulation = Simulation.objects.all()[1]
galaxy_model = simulation.galaxymodel_set.all()[4]
dataset = DataSet.objects.get(simulation=simulation, galaxy_model=galaxy_model)
self.select_dark_matter_simulation(simulation)
self.select_galaxy_model(galaxy_model)
self.click('tao-tabs-' + 'record_filter')
## default record filter for the dataset is pre-selected
self.assert_is_enabled(self.rf_id('max'))
self.assert_is_enabled(self.rf_id('min'))
self.choose_no_filter()
self.assert_is_disabled(self.rf_id('max'))
self.assert_is_disabled(self.rf_id('min'))
def _test_max_min_fields_after_failed_submit(self):
simulation = Simulation.objects.all()[1]
self.select_dark_matter_simulation(simulation)
galaxy_model = simulation.galaxymodel_set.all()[4]
self.select_galaxy_model(galaxy_model)
dataset = DataSet.objects.get(simulation=simulation, galaxy_model=galaxy_model)
dataset_parameter = dataset.datasetproperty_set.all()[0]
self.click(self.lc_2select('op_add_all'))
self.click('tao-tabs-' + 'record_filter')
self.choose_filter(dataset_parameter)
max_input = "bad number"
min_input = "73"
self.fill_in_fields({'max': max_input, 'min': min_input}, id_wrap=self.rf_id)
self.submit_mgf_form()
# check values are the same in the form as user previously selected
self.click('tao-tabs-' + 'light_cone')
self.assertEqual(simulation.name, self.get_selected_option_text(self.lc_id('dark_matter_simulation')))
self.assertEqual(galaxy_model.name, self.get_selected_option_text(self.lc_id('galaxy_model')))
# check after failed submit, max/min fields are both still enabled
self.click('tao-tabs-' + 'record_filter')
self.assert_is_enabled(self.rf_id('max'))
self.assert_is_enabled(self.rf_id('min'))
self.assertEqual(dataset_parameter.option_label(), self.get_selected_option_text(self.rf_id('filter')))
self.assertEqual(max_input, self.get_selector_value(self.rf_id('max')))
self.assertEqual(min_input, self.get_selector_value(self.rf_id('min')))
def _test_max_min_required_for_data_sets_with_no_default(self):
simulation = Simulation.objects.all()[0]
self.select_dark_matter_simulation(simulation)
galaxy_model = simulation.galaxymodel_set.all()[3]
self.select_galaxy_model(galaxy_model)
dataset = DataSet.objects.get(simulation=simulation, galaxy_model=galaxy_model)
dataset_parameter = dataset.datasetproperty_set.all()[0]
self.click(self.lc_2select('op_add_all'))
self.click('tao-tabs-' + 'record_filter')
self.choose_filter(dataset_parameter)
self.fill_in_fields({'max': '', 'min': ''}, id_wrap=self.rf_id)
self.submit_mgf_form()
self.assert_errors_on_field(True, self.rf_id('min'))
self.assert_errors_on_field(True, self.rf_id('max'))
def _test_max_min_required_for_data_sets_with_a_default(self):
simulation = Simulation.objects.all()[1]
self.select_dark_matter_simulation(simulation)
galaxy_model = simulation.galaxymodel_set.all()[4]
self.select_galaxy_model(galaxy_model)
dataset = DataSet.objects.get(simulation=simulation, galaxy_model=galaxy_model)
dataset_parameter = dataset.default_filter_field
self.click('tao-tabs-' + 'record_filter')
self.choose_filter(dataset_parameter)
self.fill_in_fields({'max': '', 'min': ''}, id_wrap=self.rf_id)
self.submit_mgf_form()
self.assert_errors_on_field(True, self.rf_id('min'))
self.assert_errors_on_field(True, self.rf_id('max'))
def _test_max_min_for_no_filter(self):
self.click(self.lc_2select('op_add_all'))
self.click('tao-tabs-' + 'record_filter')
dataset_parameter = self.initial_dataset.datasetproperty_set.all()[0]
self.choose_filter(dataset_parameter)
self.assert_is_enabled(self.rf_id('max'))
self.assert_is_enabled(self.rf_id('min'))
self.choose_no_filter()
self.assert_is_disabled(self.rf_id('max'))
self.assert_is_disabled(self.rf_id('min'))
self.submit_mgf_form()
self.assert_errors_on_field(False, self.rf_id('min'))
self.assert_errors_on_field(False, self.rf_id('max'))
def _test_redshift_max_redshift_min_fields_after_failed_submit(self):
redshift_max_input = "bad number"
redshift_min_input = "73"
self.fill_in_fields({'redshift_max': redshift_max_input, 'redshift_min': redshift_min_input}, id_wrap=self.lc_id)
self.submit_mgf_form()
self.assertEqual(redshift_max_input, self.get_selector_value(self.lc_id('redshift_max')))
self.assertEqual(redshift_min_input, self.get_selector_value(self.lc_id('redshift_min')))
def choose_filter(self, dataset_parameter):
self.select(self.rf_id('filter'), dataset_parameter.option_label())
def choose_no_filter(self):
self.select(self.rf_id('filter'), 'No Filter')
| IntersectAustralia/asvo-tao | web/tao/tests/integration_tests/filter_tests.py | Python | gpl-3.0 | 13,877 | [
"Galaxy",
"VisIt"
] | 433a80a508ac1a5723a79dfe3e2b5928c82c9fb70458305d037ef8992543cf24 |
import numpy as np
from ASE import Atom
from gpaw.utilities import equal
from gpaw.cluster import Cluster
from gpaw import Calculator
from gpaw.analyse.expandyl import ExpandYl
R = 1.
H2 = Cluster([Atom('H',(0,0,0)), Atom('H',(0,0,R))])
H2.minimal_box(3.)
calc = Calculator(h=0.2, width=0.01, nbands=2)
H2.SetCalculator(calc)
H2.GetPotentialEnergy()
yl = ExpandYl(H2.center_of_mass(), calc.wfs.gd, Rmax=2.5)
gl = []
for n in range(calc.nbands):
psit_G = calc.kpt_u[0].psit_nG[n]
norm = calc.wfs.gd.integrate(psit_G**2)
g = yl.expand(psit_G)
gsum = np.sum(g)
# allow for 10 % inaccuracy in the norm
print "norm, sum=", norm, gsum
equal(norm, gsum, 0.1)
gl.append(g/gsum*100)
# 1 sigma_g has s-symmetry mainly
print gl[0]
equal( gl[0][0], 100, 10)
# 1 sigma_u has p-symmetry mainly
print gl[1]
equal( gl[1][1], 100, 10)
| qsnake/gpaw | oldtest/analyse/yl.py | Python | gpl-3.0 | 862 | [
"ASE",
"GPAW"
] | 5e09660194970174b5281751eef9320331b61525ff9e97b50d44c5ed8334bd83 |
import gen_utils
from module_base import ModuleBase
from module_mixins import NoConfigModuleMixin
import module_utils
import vtk
import vtkdevide
class imageBacktracker(NoConfigModuleMixin, ModuleBase):
"""JORIK'S STUFF.
"""
def __init__(self, module_manager):
# call parent constructor
ModuleBase.__init__(self, module_manager)
NoConfigModuleMixin.__init__(self)
self._imageBacktracker = vtkdevide.vtkImageBacktracker()
module_utils.setup_vtk_object_progress(self, self._imageBacktracker,
'Backtracking...')
# we'll use this to keep a binding (reference) to the passed object
self._inputPoints = None
# inputPoints observer ID
self._inputPointsOID = None
# this will be our internal list of points
self._seedPoints = []
self._viewFrame = None
self._createViewFrame({'Module (self)' : self, 'vtkImageBacktracker' : self._imageBacktracker})
def close(self):
# we play it safe... (the graph_editor/module_manager should have
# disconnected us by now)
self.set_input(0, None)
self.set_input(1, None)
# don't forget to call the close() method of the vtkPipeline mixin
NoConfigModuleMixin.close(self)
# take out our view interface
del self._imageBacktracker
ModuleBase.close(self)
def get_input_descriptions(self):
return ('vtkImageData', 'Seed points')
def set_input(self, idx, inputStream):
if idx == 0:
# will work for None and not-None
self._imageBacktracker.SetInput(inputStream)
else:
if inputStream is not self._inputPoints:
if self._inputPoints:
self._inputPoints.removeObserver(self._inputPointsObserver)
if inputStream:
inputStream.addObserver(self._inputPointsObserver)
self._inputPoints = inputStream
# initial update
self._inputPointsObserver(None)
def get_output_descriptions(self):
return ('Backtracked polylines (vtkPolyData)',)
def get_output(self, idx):
return self._imageBacktracker.GetOutput()
def execute_module(self):
self._imageBacktracker.Update()
def _inputPointsObserver(self, obj):
# extract a list from the input points
tempList = []
if self._inputPoints:
for i in self._inputPoints:
tempList.append(i['discrete'])
if tempList != self._seedPoints:
self._seedPoints = tempList
self._imageBacktracker.RemoveAllSeeds()
for seedPoint in self._seedPoints:
self._imageBacktracker.AddSeed(seedPoint[0], seedPoint[1],
seedPoint[2])
print "adding %s" % (str(seedPoint))
| chrisidefix/devide | modules/user/experimental/imageBacktracker.py | Python | bsd-3-clause | 2,956 | [
"VTK"
] | cb72ad4d891f1cb19b1a43d3985c40dc4bb893492f6724c35ecaac772e72ad72 |
import sys
sys.path.insert(1, "../../../")
import h2o, tests
def offset_gaussian(ip,port):
# Connect to a pre-existing cluster
insurance = h2o.import_file(h2o.locate("smalldata/glm_test/insurance.csv"))
insurance["offset"] = insurance["Holders"].log()
gbm = h2o.gbm(x=insurance[0:3], y=insurance["Claims"], distribution="gaussian", ntrees=600, max_depth=1, min_rows=1,
learn_rate=.1, offset_column="offset", training_frame=insurance)
predictions = gbm.predict(insurance)
# Comparison result generated from R's gbm:
# fit2 <- gbm(Claims ~ District + Group + Age+ offset(log(Holders)) , interaction.depth = 1,n.minobsinnode = 1,
# shrinkage = .1,bag.fraction = 1,train.fraction = 1,
# data = Insurance, distribution ="gaussian", n.trees = 600)
# pg = predict(fit2, newdata = Insurance, type = "response", n.trees=600)
# pr = pg - - log(Insurance$Holders)
assert abs(44.33016 - gbm._model_json['output']['init_f']) < 1e-5, "expected init_f to be {0}, but got {1}". \
format(44.33016, gbm._model_json['output']['init_f'])
assert abs(1491.135 - gbm.mse()) < 1e-2, "expected mse to be {0}, but got {1}".format(1491.135, gbm.mse())
assert abs(49.23438 - predictions.mean()) < 1e-2, "expected prediction mean to be {0}, but got {1}". \
format(49.23438, predictions.mean())
assert abs(-45.5720659304 - predictions.min()) < 1e-2, "expected prediction min to be {0}, but got {1}". \
format(-45.5720659304, predictions.min())
assert abs(207.387 - predictions.max()) < 1e-2, "expected prediction max to be {0}, but got {1}". \
format(207.387, predictions.max())
if __name__ == "__main__":
tests.run_test(sys.argv, offset_gaussian)
| bospetersen/h2o-3 | h2o-py/tests/testdir_algos/gbm/pyunit_offset_gaussianGBM.py | Python | apache-2.0 | 1,768 | [
"Gaussian"
] | fc3f6e0b0d1714b7e70cbed7420d1b925c0c212e1e6f5bf38f89936ec305e280 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.