text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
#!/usr/bin/env python
# ***** BEGIN LICENSE BLOCK *****
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
# ***** END LICENSE BLOCK *****
import copy
import os
import platform
import re
import urllib2
import getpass
from mozharness.base.config import ReadOnlyDict, parse_config_file
from mozharness.base.errors import BaseErrorList
from mozharness.base.log import FATAL, WARNING
from mozharness.base.python import (
ResourceMonitoringMixin,
VirtualenvMixin,
virtualenv_config_options,
)
from mozharness.mozilla.buildbot import BuildbotMixin, TBPL_WARNING
from mozharness.mozilla.proxxy import Proxxy
from mozharness.mozilla.structuredlog import StructuredOutputParser
from mozharness.mozilla.testing.unittest import DesktopUnittestOutputParser
from mozharness.mozilla.tooltool import TooltoolMixin
from mozharness.lib.python.authentication import get_credentials
INSTALLER_SUFFIXES = ('.tar.bz2', '.zip', '.dmg', '.exe', '.apk', '.tar.gz')
testing_config_options = [
[["--installer-url"],
{"action": "store",
"dest": "installer_url",
"default": None,
"help": "URL to the installer to install",
}],
[["--installer-path"],
{"action": "store",
"dest": "installer_path",
"default": None,
"help": "Path to the installer to install. This is set automatically if run with --download-and-extract.",
}],
[["--binary-path"],
{"action": "store",
"dest": "binary_path",
"default": None,
"help": "Path to installed binary. This is set automatically if run with --install.",
}],
[["--exe-suffix"],
{"action": "store",
"dest": "exe_suffix",
"default": None,
"help": "Executable suffix for binaries on this platform",
}],
[["--test-url"],
{"action": "store",
"dest": "test_url",
"default": None,
"help": "URL to the zip file containing the actual tests",
}],
[["--jsshell-url"],
{"action": "store",
"dest": "jsshell_url",
"default": None,
"help": "URL to the jsshell to install",
}],
[["--download-symbols"],
{"action": "store",
"dest": "download_symbols",
"type": "choice",
"choices": ['ondemand', 'true'],
"help": "Download and extract crash reporter symbols.",
}],
] + copy.deepcopy(virtualenv_config_options)
# TestingMixin {{{1
class TestingMixin(VirtualenvMixin, BuildbotMixin, ResourceMonitoringMixin, TooltoolMixin):
"""
The steps to identify + download the proper bits for [browser] unit
tests and Talos.
"""
installer_url = None
installer_path = None
binary_path = None
test_url = None
test_zip_path = None
tree_config = ReadOnlyDict({})
symbols_url = None
symbols_path = None
jsshell_url = None
minidump_stackwalk_path = None
default_tools_repo = 'https://hg.mozilla.org/build/tools'
proxxy = None
def _query_proxxy(self):
"""manages the proxxy"""
if not self.proxxy:
self.proxxy = Proxxy(self.config, self.log_obj)
return self.proxxy
def download_proxied_file(self, url, file_name=None, parent_dir=None,
create_parent_dir=True, error_level=FATAL,
exit_code=3):
proxxy = self._query_proxxy()
return proxxy.download_proxied_file(url=url, file_name=file_name,
parent_dir=parent_dir,
create_parent_dir=create_parent_dir,
error_level=error_level,
exit_code=exit_code)
def download_file(self, *args, **kwargs):
'''
This function helps not to use download of proxied files
since it does not support authenticated downloads.
This could be re-factored and fixed in bug 1087664.
'''
if self.config.get("developer_mode"):
return super(TestingMixin, self).download_file(*args, **kwargs)
else:
return self.download_proxied_file(*args, **kwargs)
def query_value(self, key):
"""
This function allows us to check for a value
in the self.tree_config first and then on self.config
"""
return self.tree_config.get(key, self.config.get(key))
def query_jsshell_url(self):
"""
Attempt to determine the url of the js shell package given
the installer url.
"""
if self.jsshell_url:
return self.jsshell_url
if not self.installer_url:
self.fatal("Can't figure out jsshell without an installer_url!")
last_slash = self.installer_url.rfind('/')
base_url = self.installer_url[:last_slash]
for suffix in INSTALLER_SUFFIXES:
if self.installer_url.endswith(suffix):
no_suffix = self.installer_url[:-len(suffix)]
last_dot = no_suffix.rfind('.')
pf = no_suffix[last_dot + 1:]
self.jsshell_url = base_url + '/jsshell-' + pf + '.zip'
return self.jsshell_url
else:
self.fatal("Can't figure out jsshell from installer_url %s!" % self.installer_url)
def query_symbols_url(self):
if self.symbols_url:
return self.symbols_url
if not self.installer_url:
self.fatal("Can't figure out symbols_url without an installer_url!")
for suffix in INSTALLER_SUFFIXES:
if self.installer_url.endswith(suffix):
self.symbols_url = self.installer_url[:-len(suffix)] + '.crashreporter-symbols.zip'
return self.symbols_url
else:
self.fatal("Can't figure out symbols_url from installer_url %s!" % self.installer_url)
def _pre_config_lock(self, rw_config):
for i, (target_file, target_dict) in enumerate(rw_config.all_cfg_files_and_dicts):
if 'developer_config' in target_file:
self._developer_mode_changes(rw_config)
def _developer_mode_changes(self, rw_config):
""" This function is called when you append the config called
developer_config.py. This allows you to run a job
outside of the Release Engineering infrastructure.
What this functions accomplishes is:
* read-buildbot-config is removed from the list of actions
* --installer-url is set
* --test-url is set if needed
* every url is substituted by another external to the
Release Engineering network
"""
c = self.config
orig_config = copy.deepcopy(c)
self.warning("When you use developer_config.py, we drop " \
"'read-buildbot-config' from the list of actions.")
if "read-buildbot-config" in rw_config.actions:
rw_config.actions.remove("read-buildbot-config")
self.actions = tuple(rw_config.actions)
def _replace_url(url, changes):
for from_, to_ in changes:
if url.startswith(from_):
new_url = url.replace(from_, to_)
self.info("Replacing url %s -> %s" % (url, new_url))
return new_url
return url
assert c["installer_url"], "You must use --installer-url with developer_config.py"
if c.get("require_test_zip"):
assert c["test_url"], "You must use --test-url with developer_config.py"
c["installer_url"] = _replace_url(c["installer_url"], c["replace_urls"])
if c.get("test_url"):
c["test_url"] = _replace_url(c["test_url"], c["replace_urls"])
for key, value in self.config.iteritems():
if type(value) == str and value.startswith("http"):
self.config[key] = _replace_url(value, c["replace_urls"])
# Any changes to c means that we need credentials
if not c == orig_config:
get_credentials()
def _urlopen(self, url, **kwargs):
'''
This function helps dealing with downloading files while outside
of the releng network.
'''
# Code based on http://code.activestate.com/recipes/305288-http-basic-authentication
def _urlopen_basic_auth(url, **kwargs):
self.info("We want to download this file %s" % url)
if not hasattr(self, "https_username"):
self.info("NOTICE: Files downloaded from outside of "
"Release Engineering network require LDAP "
"credentials.")
self.https_username, self.https_password = get_credentials()
# This creates a password manager
passman = urllib2.HTTPPasswordMgrWithDefaultRealm()
# Because we have put None at the start it will use this username/password combination from here on
passman.add_password(None, url, self.https_username, self.https_password)
authhandler = urllib2.HTTPBasicAuthHandler(passman)
return urllib2.build_opener(authhandler).open(url, **kwargs)
# If we have the developer_run flag enabled then we will switch
# URLs to the right place and enable http authentication
if "developer_config.py" in self.config["config_files"]:
return _urlopen_basic_auth(url, **kwargs)
else:
return urllib2.urlopen(url, **kwargs)
# read_buildbot_config is in BuildbotMixin.
def postflight_read_buildbot_config(self):
"""
Determine which files to download from the buildprops.json file
created via the buildbot ScriptFactory.
"""
if self.buildbot_config:
c = self.config
message = "Unable to set %s from the buildbot config"
if c.get("installer_url"):
self.installer_url = c['installer_url']
if c.get("test_url"):
self.test_url = c['test_url']
try:
files = self.buildbot_config['sourcestamp']['changes'][-1]['files']
buildbot_prop_branch = self.buildbot_config['properties']['branch']
# Bug 868490 - Only require exactly two files if require_test_zip;
# otherwise accept either 1 or 2, since we'll be getting a
# test_zip url that we don't need.
expected_length = [1, 2, 3]
if c.get("require_test_zip") and not self.test_url:
expected_length = [2, 3]
if buildbot_prop_branch.startswith('gaia-try'):
expected_length = range(1, 1000)
actual_length = len(files)
if actual_length not in expected_length:
self.fatal("Unexpected number of files in buildbot config %s.\nExpected these number(s) of files: %s, but got: %d" %
(c['buildbot_json_path'], str(expected_length), actual_length))
for f in files:
if f['name'].endswith('tests.zip'): # yuk
if not self.test_url:
# str() because of unicode issues on mac
self.test_url = str(f['name'])
self.info("Found test url %s." % self.test_url)
elif f['name'].endswith('crashreporter-symbols.zip'): # yuk
self.symbols_url = str(f['name'])
self.info("Found symbols url %s." % self.symbols_url)
else:
if not self.installer_url:
self.installer_url = str(f['name'])
self.info("Found installer url %s." % self.installer_url)
except IndexError, e:
self.error(str(e))
missing = []
if not self.installer_url:
missing.append("installer_url")
if c.get("require_test_zip") and not self.test_url:
missing.append("test_url")
if missing:
self.fatal("%s!" % (message % ('+'.join(missing))))
else:
self.fatal("self.buildbot_config isn't set after running read_buildbot_config!")
def _query_binary_version(self, regex, cmd):
output = self.get_output_from_command(cmd, silent=False)
return regex.search(output).group(0)
def preflight_download_and_extract(self):
message = ""
if not self.installer_url:
message += """installer_url isn't set!
You can set this by:
1. specifying --installer-url URL, or
2. running via buildbot and running the read-buildbot-config action
"""
if self.config.get("require_test_zip") and not self.test_url:
message += """test_url isn't set!
You can set this by:
1. specifying --test-url URL, or
2. running via buildbot and running the read-buildbot-config action
"""
if message:
self.fatal(message + "Can't run download-and-extract... exiting")
if self.config.get("developer_mode") and self._is_darwin():
# Bug 1066700 only affects Mac users that try to run mozharness locally
version = self._query_binary_version(
regex=re.compile("UnZip\ (\d+\.\d+)\ .*",re.MULTILINE),
cmd=[self.query_exe('unzip'), '-v']
)
if not version >= 6:
self.fatal("We require a more recent version of unzip to unpack our tests.zip files.\n" \
"You are currently using version %s. Please update to at least 6.0.\n" \
"You can visit http://www.info-zip.org/UnZip.html" % version)
def _download_test_zip(self):
dirs = self.query_abs_dirs()
file_name = None
if self.test_zip_path:
file_name = self.test_zip_path
# try to use our proxxy servers
# create a proxxy object and get the binaries from it
source = self.download_file(self.test_url, file_name=file_name,
parent_dir=dirs['abs_work_dir'],
error_level=FATAL)
self.test_zip_path = os.path.realpath(source)
def _download_unzip(self, url, parent_dir):
"""Generic download+unzip.
This is hardcoded to halt on failure.
We should probably change some other methods to call this."""
dirs = self.query_abs_dirs()
zipfile = self.download_file(url, parent_dir=dirs['abs_work_dir'],
error_level=FATAL)
command = self.query_exe('unzip', return_type='list')
command.extend(['-q', '-o', zipfile])
self.run_command(command, cwd=parent_dir, halt_on_failure=True,
fatal_exit_code=3, output_timeout=1760)
def _extract_test_zip(self, target_unzip_dirs=None):
dirs = self.query_abs_dirs()
unzip = self.query_exe("unzip")
test_install_dir = dirs.get('abs_test_install_dir',
os.path.join(dirs['abs_work_dir'], 'tests'))
self.mkdir_p(test_install_dir)
# adding overwrite flag otherwise subprocess.Popen hangs on waiting for
# input in a hidden pipe whenever this action is run twice without
# clobber
unzip_cmd = [unzip, '-q', '-o', self.test_zip_path]
if target_unzip_dirs:
unzip_cmd.extend(target_unzip_dirs)
# TODO error_list
# unzip return code 11 is 'no matching files were found'
self.run_command(unzip_cmd, cwd=test_install_dir,
halt_on_failure=True, success_codes=[0, 11],
fatal_exit_code=3)
def _read_tree_config(self):
"""Reads an in-tree config file"""
dirs = self.query_abs_dirs()
test_install_dir = dirs.get('abs_test_install_dir',
os.path.join(dirs['abs_work_dir'], 'tests'))
if 'in_tree_config' in self.config:
rel_tree_config_path = self.config['in_tree_config']
tree_config_path = os.path.join(test_install_dir, rel_tree_config_path)
if not os.path.isfile(tree_config_path):
self.fatal("The in-tree configuration file '%s' does not exist!"
"It must be added to '%s'. See bug 1035551 for more details." %
(tree_config_path, os.path.join('gecko', 'testing', rel_tree_config_path)))
try:
self.tree_config.update(parse_config_file(tree_config_path))
except:
msg = "There was a problem parsing the in-tree configuration file '%s'!" % \
os.path.join('gecko', 'testing', rel_tree_config_path)
self.exception(message=msg, level=FATAL)
self.dump_config(file_path=os.path.join(dirs['abs_log_dir'], 'treeconfig.json'),
config=self.tree_config)
self.tree_config.lock()
def structured_output(self, suite_category):
"""Defines whether structured logging is in use in this configuration. This
may need to be replaced with data from a different config at the resolution
of bug 1070041 and related bugs.
"""
return ('structured_suites' in self.tree_config and
suite_category in self.tree_config['structured_suites'])
def get_test_output_parser(self, suite_category, strict=False,
fallback_parser_class=DesktopUnittestOutputParser,
**kwargs):
"""Derive and return an appropriate output parser, either the structured
output parser or a fallback based on the type of logging in use as determined by
configuration.
"""
if not self.structured_output(suite_category):
if fallback_parser_class is DesktopUnittestOutputParser:
return DesktopUnittestOutputParser(suite_category=suite_category, **kwargs)
return fallback_parser_class(**kwargs)
self.info("Structured output parser in use for %s." % suite_category)
return StructuredOutputParser(suite_category=suite_category, strict=strict, **kwargs)
def _download_installer(self):
file_name = None
if self.installer_path:
file_name = self.installer_path
dirs = self.query_abs_dirs()
source = self.download_file(self.installer_url,
file_name=file_name,
parent_dir=dirs['abs_work_dir'],
error_level=FATAL)
self.installer_path = os.path.realpath(source)
self.set_buildbot_property("build_url", self.installer_url, write_to_file=True)
def _download_and_extract_symbols(self):
dirs = self.query_abs_dirs()
self.symbols_url = self.query_symbols_url()
if self.config.get('download_symbols') == 'ondemand':
self.symbols_path = self.symbols_url
return
if not self.symbols_path:
self.symbols_path = os.path.join(dirs['abs_work_dir'], 'symbols')
self.mkdir_p(self.symbols_path)
source = self.download_file(self.symbols_url,
parent_dir=self.symbols_path,
error_level=FATAL)
self.set_buildbot_property("symbols_url", self.symbols_url,
write_to_file=True)
self.run_command(['unzip', '-q', source], cwd=self.symbols_path,
halt_on_failure=True, fatal_exit_code=3)
def download_and_extract(self, target_unzip_dirs=None):
"""
download and extract test zip / download installer
"""
# Swap plain http for https when we're downloading from ftp
# See bug 957502 and friends
from_ = "http://ftp.mozilla.org"
to_ = "https://ftp-ssl.mozilla.org"
for attr in 'test_url', 'symbols_url', 'installer_url':
url = getattr(self, attr)
if url and url.startswith(from_):
new_url = url.replace(from_, to_)
self.info("Replacing url %s -> %s" % (url, new_url))
setattr(self, attr, new_url)
if self.test_url:
self._download_test_zip()
self._extract_test_zip(target_unzip_dirs=target_unzip_dirs)
self._read_tree_config()
self._download_installer()
if self.config.get('download_symbols'):
self._download_and_extract_symbols()
# create_virtualenv is in VirtualenvMixin.
def preflight_install(self):
if not self.installer_path:
if self.config.get('installer_path'):
self.installer_path = self.config['installer_path']
else:
self.fatal("""installer_path isn't set!
You can set this by:
1. specifying --installer-path PATH, or
2. running the download-and-extract action
""")
if not self.is_python_package_installed("mozInstall"):
self.fatal("""Can't call install() without mozinstall!
Did you run with --create-virtualenv? Is mozinstall in virtualenv_modules?""")
def install_app(self, app=None, target_dir=None, installer_path=None):
""" Dependent on mozinstall """
# install the application
cmd = self.query_exe("mozinstall", default=self.query_python_path("mozinstall"), return_type="list")
if app:
cmd.extend(['--app', app])
# Remove the below when we no longer need to support mozinstall 0.3
self.info("Detecting whether we're running mozinstall >=1.0...")
output = self.get_output_from_command(cmd + ['-h'])
if '--source' in output:
cmd.append('--source')
# End remove
dirs = self.query_abs_dirs()
if not target_dir:
target_dir = dirs.get('abs_app_install_dir',
os.path.join(dirs['abs_work_dir'],
'application'))
self.mkdir_p(target_dir)
if not installer_path:
installer_path = self.installer_path
cmd.extend([installer_path,
'--destination', target_dir])
# TODO we'll need some error checking here
return self.get_output_from_command(cmd, halt_on_failure=True,
fatal_exit_code=3)
def install(self):
self.binary_path = self.install_app(app=self.config.get('application'))
def query_minidump_tooltool_manifest(self):
if self.config.get('minidump_tooltool_manifest_path'):
return self.config['minidump_tooltool_manifest_path']
self.info('minidump tooltool manifest unknown. determining based upon platform and arch')
tooltool_path = "config/tooltool-manifests/%s/releng.manifest"
if self._is_windows():
# we use the same minidump binary for 32 and 64 bit windows
return tooltool_path % 'win32'
elif self._is_darwin():
# we only use the 64 bit binary for osx
return tooltool_path % 'macosx64'
elif self._is_linux():
if self._is_64_bit():
return tooltool_path % 'linux64'
else:
return tooltool_path % 'linux32'
else:
self.fatal('could not determine minidump tooltool manifest')
def query_minidump_filename(self):
if self.config.get('minidump_stackwalk_path'):
return self.config['minidump_stackwalk_path']
self.info('minidump filename unknown. determining based upon platform and arch')
minidump_filename = '%s-minidump_stackwalk'
if self._is_windows():
# we use the same minidump binary for 32 and 64 bit windows
return minidump_filename % ('win32',) + '.exe'
elif self._is_darwin():
# we only use the 64 bit binary for osx
return minidump_filename % ('macosx64',)
elif self._is_linux():
if self._is_64_bit():
return minidump_filename % ('linux64',)
else:
return minidump_filename % ('linux32',)
else:
self.fatal('could not determine minidump filename')
def query_minidump_stackwalk(self):
if self.minidump_stackwalk_path:
return self.minidump_stackwalk_path
c = self.config
dirs = self.query_abs_dirs()
if c.get('download_minidump_stackwalk'):
minidump_stackwalk_path = self.query_minidump_filename()
tooltool_manifest_path = self.query_minidump_tooltool_manifest()
self.info('grabbing minidump binary from tooltool')
try:
self.tooltool_fetch(
manifest=os.path.join(dirs.get('abs_test_install_dir',
os.path.join(dirs['abs_work_dir'], 'tests')),
tooltool_manifest_path),
output_dir=dirs['abs_work_dir'],
cache=c.get('tooltool_cache')
)
except KeyError:
self.error('missing a required key. is "tooltool_servers" in self.config?')
abs_minidump_path = os.path.join(dirs['abs_work_dir'],
minidump_stackwalk_path)
if os.path.exists(abs_minidump_path):
self.chmod(abs_minidump_path, 0755)
self.minidump_stackwalk_path = abs_minidump_path
else:
self.warning("minidump stackwalk path was given but couldn't be found. "
"Tried looking in '%s'" % abs_minidump_path)
# don't burn the job but we should at least turn them orange so it is caught
self.buildbot_status(TBPL_WARNING, WARNING)
return self.minidump_stackwalk_path
def _run_cmd_checks(self, suites):
if not suites:
return
dirs = self.query_abs_dirs()
for suite in suites:
# XXX platform.architecture() may give incorrect values for some
# platforms like mac as excutable files may be universal
# files containing multiple architectures
# NOTE 'enabled' is only here while we have unconsolidated configs
if not suite['enabled']:
continue
if suite.get('architectures'):
arch = platform.architecture()[0]
if arch not in suite['architectures']:
continue
cmd = suite['cmd']
name = suite['name']
self.info("Running pre test command %(name)s with '%(cmd)s'"
% {'name': name, 'cmd': ' '.join(cmd)})
if self.buildbot_config: # this cmd is for buildbot
# TODO rather then checking for formatting on every string
# in every preflight enabled cmd: find a better solution!
# maybe I can implement WithProperties in mozharness?
cmd = [x % (self.buildbot_config.get('properties'))
for x in cmd]
self.run_command(cmd,
cwd=dirs['abs_work_dir'],
error_list=BaseErrorList,
halt_on_failure=suite['halt_on_failure'],
fatal_exit_code=suite.get('fatal_exit_code', 3))
def preflight_run_tests(self):
"""preflight commands for all tests"""
# If the in tree config hasn't been loaded by a previous step, load it here.
if len(self.tree_config) == 0:
self._read_tree_config()
c = self.config
if c.get('run_cmd_checks_enabled'):
self._run_cmd_checks(c.get('preflight_run_cmd_suites', []))
elif c.get('preflight_run_cmd_suites'):
self.warning("Proceeding without running prerun test commands."
" These are often OS specific and disabling them may"
" result in spurious test results!")
def postflight_run_tests(self):
"""preflight commands for all tests"""
c = self.config
if c.get('run_cmd_checks_enabled'):
self._run_cmd_checks(c.get('postflight_run_cmd_suites', []))
|
walac/build-mozharness
|
mozharness/mozilla/testing/testbase.py
|
Python
|
mpl-2.0
| 28,711
|
[
"VisIt"
] |
50ead9908b2021d568bf778026ffff603d96d456147ebcfe5e00410df849dbce
|
#!/usr/bin/python2.6
# This file is a part of Metagam project.
#
# Metagam is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# any later version.
#
# Metagam is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Metagam. If not, see <http://www.gnu.org/licenses/>.
from mg.core.cass import CassandraObject, CassandraObjectList, ObjectNotFoundException
from mg.core.applications import Module
from uuid import uuid4
from wsgiref.handlers import format_date_time
from datetime import datetime
from PIL import Image, ImageDraw, ImageFilter, ImageEnhance
from mg.core.bezier import make_bezier
from mg.core.tools import *
from operator import itemgetter
import cStringIO
import time
import re
import random
import hashlib
import cgi
log_per_page = 50000
re_newline = re.compile(r'\n')
re_permissions_args = re.compile(r'^([a-f0-9]+)(?:(.+)|)$', re.DOTALL)
re_track_user = re.compile(r'^user/([a-f0-9]+)$')
re_track_player = re.compile(r'^player/([a-f0-9]+)$')
re_track_cookie = re.compile(r'^cookie/([a-f0-9]+)$')
re_track_ip = re.compile(r'^ip/([0-9a-f\.:]+)$')
re_short = re.compile(r'^(.{6}).*(.{6})$')
re_nonalphanum = re.compile(r'[^a-zA-Z0-9_]')
class User(CassandraObject):
clsname = "User"
indexes = {
"created": [[], "created"],
"last_login": [[], "last_login"],
"name": [["name_lower"]],
"inactive": [["inactive"], "created"],
"email": [["email"]],
"tag": [["tag"]],
"check": [["check"], "created"],
}
class UserList(CassandraObjectList):
objcls = User
class UserPermissions(CassandraObject):
clsname = "UserPermissions"
indexes = {
"any": [["any"]],
}
def sync(self):
self.set("any", "1")
class UserPermissionsList(CassandraObjectList):
objcls = UserPermissions
class Session(CassandraObject):
clsname = "Session"
indexes = {
"valid_till": [[], "valid_till"],
"user": [["user"]],
"authorized": [["authorized"]],
"authorized_user": [["authorized", "user"]],
}
def user(self):
return self.get("user")
def semi_user(self):
user = self.get("user")
if user is not None:
return user
return self.get("semi_user")
class SessionList(CassandraObjectList):
objcls = Session
class Captcha(CassandraObject):
clsname = "Captcha"
indexes = {
"valid_till": [[], "valid_till"],
}
class CaptchaList(CassandraObjectList):
objcls = Captcha
class DBBanIP(CassandraObject):
clsname = "BanIP"
indexes = {
"user": [["user"]],
"till": [[], "till"],
}
class DBBanIPList(CassandraObjectList):
objcls = DBBanIP
class AutoLogin(CassandraObject):
clsname = "AutoLogin"
indexes = {
"valid_till": [[], "valid_till"],
}
class AutoLoginList(CassandraObjectList):
objcls = AutoLogin
class AuthLog(CassandraObject):
clsname = "AuthLog"
indexes = {
"performed": [[], "performed"],
"user_performed": [["user"], "performed"],
"player_performed": [["player"], "performed"],
"session_performed": [["session"], "performed"],
"ip_performed": [["ip"], "performed"],
}
class AuthLogList(CassandraObjectList):
objcls = AuthLog
class DossierRecord(CassandraObject):
clsname = "DossierRecord"
indexes = {
"user_performed": [["user"], "performed"],
"admin_performed": [["admin"], "performed"],
}
class DossierRecordList(CassandraObjectList):
objcls = DossierRecord
class Sessions(Module):
"The mostly used authentication functions. It must load very fast"
def register(self):
self.rhook("session.get", self.get_session)
self.rhook("session.require_login", self.require_login)
self.rhook("session.find_user", self.find_user)
self.rhook("session.require_permission", self.require_permission)
self.rhook("session.log", self.log)
def log(self, **kwargs):
self.call("session.log-fix", kwargs)
ent = self.obj(AuthLog)
for key, value in kwargs.iteritems():
if key == "session":
m = hashlib.md5()
m.update(value)
value = m.hexdigest()
ent.set(key, value)
ent.set("performed", self.now())
ent.store()
def find_session(self, sid):
try:
return self.obj(Session, sid)
except ObjectNotFoundException:
return None
def get_session(self, create=False, cache=True, domain=None):
req = self.req()
if cache:
try:
return req._session
except AttributeError:
pass
cookie_name = "mgsess-%s" % self.app().tag
sid = req.cookie(cookie_name)
if sid is not None:
session = self.find_session(sid)
if session is not None:
# update session every hour
if session.get("updated") < self.now(-3600):
with self.lock(["session.%s" % session.uuid]):
session.load()
session.set("valid_till", "%020d" % (self.time() + 90 * 86400))
session.set("updated", self.now())
if session.get("ip") != req.remote_addr():
session.set("ip", req.remote_addr())
user = session.get("user")
if user:
self.call("session.log", act="change", session=session.uuid, ip=req.remote_addr(), user=user)
session.store()
if cache:
req._session = session
return session
if not create:
return None
sid = uuid4().hex
session = self.obj(Session, sid, {})
if create:
args = {}
if domain is None:
domain = req.environ.get("HTTP_X_REAL_HOST")
if domain is not None:
#domain = re.sub(r'^www\.', '', domain)
args["domain"] = "." + domain
args["path"] = "/"
args["expires"] = format_date_time(time.mktime(datetime.datetime.now().timetuple()) + 90 * 86400)
req.set_cookie(cookie_name, sid, **args)
# newly created session is stored for 24 hour only
# this interval is increased after the next successful 'get'
session.set("valid_till", "%020d" % (self.time() + 86400))
session.set("ip", req.remote_addr())
# Time in the past. This guarantees that get_session will properly update valid_till on the next get
session.set("updated", self.now(-3601))
session.store()
if cache:
req._session = session
return session
def require_login(self):
req = self.req()
session = req.session()
if not session or not session.get("user"):
self.call("web.redirect", "/auth/login?redirect=%s" % urlencode(req.uri()))
def find_user(self, val, allow_email=False, allow_name=True, return_id=False):
val = val.lower()
if allow_name:
users = self.objlist(UserList, query_index="name", query_equal=val)
if len(users):
if return_id:
return users[0].uuid
users.load()
return users[0]
if allow_email:
users = self.objlist(UserList, query_index="email", query_equal=val)
if len(users):
if return_id:
return users[0].uuid
users.load()
return users[0]
return None
def require_permission(self, perm):
req = self.req()
if not req.has_access(perm):
self.call("web.forbidden")
class Interface(Module):
"Functions used in special interfaces (user and admin)"
def register(self):
self.rhook("auth.permissions", self.auth_permissions)
self.rhook("auth.grant-permission", self.auth_grant_permission)
self.rhook("menu-admin-root.index", self.menu_root_index)
self.rhook("menu-admin-auth.index", self.menu_auth_index)
self.rhook("ext-admin-auth.permissions", self.admin_permissions, priv="permissions")
self.rhook("headmenu-admin-auth.permissions", self.headmenu_permissions)
self.rhook("ext-admin-auth.editpermissions", self.admin_editpermissions, priv="permissions")
self.rhook("headmenu-admin-auth.editpermissions", self.headmenu_editpermissions)
self.rhook("ext-admin-auth.edituserpermissions", self.admin_edituserpermissions, priv="permissions")
self.rhook("headmenu-admin-auth.edituserpermissions", self.headmenu_edituserpermissions)
self.rhook("permissions.list", self.permissions_list)
self.rhook("security.list-roles", self.list_roles)
self.rhook("security.users-roles", self.users_roles)
self.rhook("queue-gen.schedule", self.schedule)
self.rhook("auth.cleanup", self.cleanup)
self.rhook("auth.cleanup-inactive-users", self.cleanup_inactive_users)
self.rhook("ext-auth.register", self.ext_register, priv="public")
self.rhook("ext-auth.captcha", self.ext_captcha, priv="public")
self.rhook("ext-auth.logout", self.ext_logout, priv="public")
self.rhook("ext-auth.login", self.ext_login, priv="public")
self.rhook("auth.messages", self.messages, priority=10)
self.rhook("ext-auth.activate", self.ext_activate, priv="public")
self.rhook("ext-auth.reactivate", self.ext_reactivate, priv="public")
self.rhook("auth.message", self.auth_message)
self.rhook("ext-auth.remind", self.ext_remind, priv="public")
self.rhook("ext-auth.change", self.ext_change, priv="logged")
self.rhook("ext-auth.email", self.ext_email, priv="logged")
self.rhook("objclasses.list", self.objclasses_list)
self.rhook("ext-admin-auth.user-find", self.ext_user_find, priv="users")
self.rhook("ext-admin-auth.user-dashboard", self.ext_user_dashboard, priv="users")
self.rhook("ext-admin-auth.user-lastreg", self.ext_user_lastreg, priv="users")
self.rhook("headmenu-admin-auth.user-dashboard", self.headmenu_user_dashboard)
self.rhook("auth.autologin", self.autologin)
self.rhook("web.robots-txt", self.robots_txt)
self.rhook("user.email", self.user_email)
self.rhook("ext-admin-auth.change-password", self.admin_change_password, priv="change.passwords")
self.rhook("headmenu-admin-auth.change-password", self.headmenu_change_password)
self.rhook("ext-admin-auth.change-name", self.admin_change_name, priv="change.names")
self.rhook("headmenu-admin-auth.change-name", self.headmenu_change_name)
self.rhook("ext-admin-auth.track", self.admin_auth_track, priv="auth.tracking")
self.rhook("headmenu-admin-auth.track", self.headmenu_auth_track, priv="auth.tracking")
self.rhook("auth.password-reminder", self.password_reminder)
def user_email(self, user_obj):
return user_obj.get("email")
def robots_txt(self, disallow):
disallow.append("/auth/")
def schedule(self, sched):
sched.add("auth.cleanup", "5 1 * * *", priority=10)
def cleanup(self):
sessions = self.objlist(SessionList, query_index="valid_till", query_finish="%020d" % self.time())
sessions.remove()
captchas = self.objlist(CaptchaList, query_index="valid_till", query_finish="%020d" % self.time())
captchas.remove()
autologins = self.objlist(AutoLoginList, query_index="valid_till", query_finish="%020d" % self.time())
autologins.remove()
authlog = self.objlist(AuthLogList, query_index="performed", query_finish=self.now(-365 * 86400))
authlog.remove()
banips = self.objlist(DBBanIPList, query_index="till", query_finish=self.now())
banips.remove()
def cleanup_inactive_users(self):
users = self.objlist(UserList, query_index="inactive", query_equal="1", query_finish="%020d" % (self.time() - 86400 * 3))
users.remove()
def objclasses_list(self, objclasses):
objclasses["User"] = (User, UserList)
objclasses["UserPermissions"] = (UserPermissions, UserPermissionsList)
objclasses["Session"] = (Session, SessionList)
objclasses["Captcha"] = (Captcha, CaptchaList)
objclasses["AutoLogin"] = (AutoLogin, AutoLoginList)
objclasses["AuthLog"] = (AuthLog, AuthLogList)
objclasses["DossierRecord"] = (DossierRecord, DossierRecordList)
objclasses["BanIP"] = (DBBanIP, DBBanIPList)
def ext_register(self):
req = self.req()
session = req.session(True)
form = self.call("web.form")
name = req.param("name").strip()
sex = req.param("sex").strip()
email = req.param("email").strip()
password1 = req.param("password1")
password2 = req.param("password2")
captcha = req.param("captcha").strip()
redirect = req.param("redirect")
params = {
"name_re": r'^[A-Za-z0-9_-]+$',
"name_invalid_re": self._("Invalid characters in the name. Only latin letters, numbers, symbols '_' and '-' are allowed"),
}
self.call("auth.form_params", params)
if req.ok():
if not name:
form.error("name", self._("Enter your user name"))
elif not re.match(params["name_re"], name, re.UNICODE):
form.error("name", params["name_invalid_re"])
elif self.call("session.find_user", name, allow_email=True):
form.error("name", self._("This name is taken already"))
if not password1:
form.error("password1", self._("Enter your password"))
elif len(password1) < 6:
form.error("password1", self._("Minimal password length - 6 characters"))
elif not password2:
form.error("password2", self._("Retype your password"))
elif password1 != password2:
form.error("password2", self._("Password don't match. Try again, please"))
password1 = ""
password2 = ""
if sex != "0" and sex != "1":
form.error("sex", self._("Select your sex"))
if not email:
form.error("email", self._("Enter your e-mail address"))
elif not re.match(r'^[a-zA-Z0-9_\-+\.]+@[a-zA-Z0-9\-_\.]+\.[a-zA-Z0-9]+$', email):
form.error("email", self._("Enter correct e-mail"))
else:
existing_email = self.objlist(UserList, query_index="email", query_equal=email.lower())
existing_email.load(silent=True)
if len(existing_email):
form.error("email", self._("There is another user with this email"))
if not captcha:
form.error("captcha", self._("Enter numbers from the picture"))
else:
try:
cap = self.obj(Captcha, session.uuid)
if cap.get("number") != captcha:
form.error("captcha", self._("Incorrect number"))
except ObjectNotFoundException:
form.error("captcha", self._("Incorrect number"))
self.call("auth.register-form", form, "validate")
if not form.errors:
email = email.lower()
user = self.obj(User)
now = "%020d" % self.time()
user.set("created", now)
user.set("last_login", now)
user.set("sex", sex)
user.set("name", name)
user.set("name_lower", name.lower())
user.set("email", email.lower())
user.set("inactive", 1)
activation_code = uuid4().hex
user.set("activation_code", activation_code)
user.set("activation_redirect", redirect)
salt = ""
letters = "abcdefghijklmnopqrstuvwxyz"
for i in range(0, 10):
salt += random.choice(letters)
user.set("salt", salt)
user.set("pass_reminder", self.call("auth.password-reminder", password1))
m = hashlib.md5()
m.update(salt + password1.encode("utf-8"))
user.set("pass_hash", m.hexdigest())
user.store()
with self.lock(["session.%s" % session.uuid]):
session.load()
session.delkey("user")
session.set("semi_user", user.uuid)
session.set("ip", req.remote_addr())
session.store()
self.call("session.log", act="register", session=session.uuid, ip=req.remote_addr(), user=user.uuid)
params = {
"subject": self._("Account activation"),
"content": self._("Someone possibly you requested registration on the {host}. If you really want to do this enter the following activation code on the site:\n\n{code}\n\nor simply follow the link:\n\n{protocol}://{host}/auth/activate/{user}?code={code}"),
}
self.call("auth.activation_email", params)
self.call("email.send", email, name, params["subject"], params["content"].format(code=activation_code, host=req.host(), user=user.uuid, protocol=self.app().protocol))
self.call("web.redirect", "/auth/activate/%s" % user.uuid)
if redirect is not None:
form.hidden("redirect", redirect)
form.input(self._("User name"), "name", name)
form.select(self._("Sex"), "sex", sex, [{"value": 0, "description": self._("Male")}, {"value": 1, "description": self._("Female")}])
form.input(self._("E-mail"), "email", email)
form.password(self._("Password"), "password1", password1)
form.password(self._("Confirm password"), "password2", password2)
form.input('<img id="captcha" src="/auth/captcha" alt="" /><br />' + self._('Enter a number (6 digits) from the picture'), "captcha", "")
self.call("auth.register-form", form, "render")
form.submit(None, None, self._("Register"))
vars = {
"title": self._("User registration"),
"ret": {
"href": "/",
"title": self._("Cancel"),
},
}
self.call("auth.form", form, vars)
self.call("web.response_global", form.html(vars), vars)
def ext_activate(self):
req = self.req()
try:
user = self.obj(User, req.args)
except ObjectNotFoundException:
self.call("web.not_found")
session = req.session(True)
redirects = {}
self.call("auth.redirects", redirects)
code = req.param("code").strip()
if not user.get("inactive"):
self.call("web.redirect", redirects.get("register", "/"))
vars = {
"title": self._("User activation"),
}
form = self.call("web.form")
if req.param("ok") or req.param("okget"):
if not code:
form.error("code", self._("Enter activation code from your e-mail box"))
elif code != user.get("activation_code"):
form.error("code", self._("Invalid activation code"))
if not form.errors:
redirect = user.get("activation_redirect")
with self.lock(["user.%s" % user.uuid]):
user.load()
user.delkey("inactive")
user.delkey("activation_code")
user.delkey("activation_redirect")
user.store()
self.call("auth.registered", user)
self.call("auth.activated", user, redirect)
with self.lock(["session.%s" % session.uuid]):
session.load()
session.set("user", user.uuid)
session.delkey("semi_user")
session.set("ip", req.remote_addr())
session.store()
self.call("session.log", act="login", session=session.uuid, ip=req.remote_addr(), user=user.uuid)
if not redirect:
redirect = redirects.get("register", "/")
form = self.call("web.form", action=redirect)
form.method = "get"
form.add_message_top(self._("Your account was registered successfully"))
form.submit(None, None, self._("Continue"))
self.call("auth.render-activated-form", user, form)
self.call("web.response_global", form.html(), vars)
form.input(self._("Activation code"), "code", code)
form.submit(None, None, self._("Activate"))
form.add_message_top(self._("A message was sent to your mailbox. Enter the activation code from this message."))
form.add_message_bottom(self._('If you have not received activation letter you can <a href="/auth/reactivate/%s">send another one or change your e-mail</a>') % user.uuid)
self.call("auth.form", form, vars)
self.call("web.response_global", form.html(), vars)
def ext_reactivate(self):
req = self.req()
try:
user = self.obj(User, req.args)
except ObjectNotFoundException:
self.call("web.not_found")
if not user.get("inactive"):
redirects = {}
self.call("auth.redirects", redirects)
if redirects.has_key("register"):
self.call("web.redirect", redirects["register"])
self.call("web.redirect", "/")
session = req.session(True)
form = self.call("web.form")
email = req.param("email")
captcha = req.param("captcha").strip()
password = req.param("password")
if req.ok():
msg = {}
self.call("auth.messages", msg)
if not captcha:
form.error("captcha", self._("Enter numbers from the picture"))
else:
try:
cap = self.obj(Captcha, session.uuid)
if cap.get("number") != captcha:
form.error("captcha", self._("Incorrect number"))
except ObjectNotFoundException:
form.error("captcha", self._("Incorrect number"))
if not email:
form.error("email", self._("Enter new e-mail address"))
elif not re.match(r'^[a-zA-Z0-9_\-+\.]+@[a-zA-Z0-9\-_\.]+\.[a-zA-Z0-9]+$', email):
form.error("email", self._("Enter correct e-mail"))
else:
existing_email = self.objlist(UserList, query_index="email", query_equal=email.lower())
existing_email.load(silent=True)
if len(existing_email) > 1 or len(existing_email) and existing_email[0].uuid != user.uuid:
form.error("email", self._("There is another user with this email"))
if not password:
form.error("password", msg["password_empty"])
if not form.errors:
m = hashlib.md5()
m.update(user.get("salt").encode("utf-8") + password.encode("utf-8"))
if m.hexdigest() != user.get("pass_hash"):
form.error("password", msg["password_incorrect"])
if not form.errors:
user.set("email", email.lower())
activation_code = uuid4().hex
user.set("activation_code", activation_code)
user.store()
params = {
"subject": self._("Account activation"),
"content": self._("Someone possibly you requested registration on the {host}. If you really want to do this enter the following activation code on the site:\n\n{code}\n\nor simply follow the link:\n\n{protocol}://{host}/auth/activate/{user}?code={code}"),
}
self.call("auth.activation_email", params)
self.call("email.send", email, user.get("name"), params["subject"], params["content"].format(code=activation_code, host=req.host(), user=user.uuid, protocol=self.app().protocol))
self.call("web.redirect", "/auth/activate/%s" % user.uuid)
form.input(self._("New e-mail"), "email", email)
form.input('<img id="captcha" src="/auth/captcha" alt="" /><br />' + self._('Enter a number (6 digits) from the picture'), "captcha", "")
form.password(self._("Password"), "password", password)
form.submit(None, None, self._("Reactivate"))
vars = {
"title": self._("Retrying activation"),
}
self.call("auth.form", form, vars)
self.call("web.response_global", form.html(), vars)
def ext_remind(self):
req = self.req()
form = self.call("web.form")
email = req.param("email")
redirect = req.param("redirect")
vars = {
"title": self._("Password reminder"),
}
if req.ok():
if not email:
form.error("email", self._("Enter your e-mail"))
if not form.errors:
lst = self.objlist(UserList, query_index="email", query_equal=email.lower())
if not len(lst):
form.error("email", self._("No users with this e-mail"))
if not form.errors:
lst.load()
name = ""
content = ""
for user in lst:
msg = self.call("auth.remind-message", user) or self._("User '{user}' has password '{password}'\n").format(user=user.get("name"), password=user.get("pass_reminder"))
content += msg
name = user.get("name")
params = {
"subject": self._("Password reminder"),
"content": self._("Someone possibly you requested password recovery on the {host} site. Accounts registered with your e-mail are:\n\n{content}\nIf you still can't remember your password feel free to contact our support.")
}
self.call("auth.remind_email", params)
self.call("email.unblacklist", email)
self.call("email.send", email, name, params["subject"], params["content"].format(content=content, host=req.host()))
vars["ret"] = {
"href": redirect if redirect else "/auth/login",
"html": self._("Return")
}
self.call("auth.message", self._("We have sent you an e-mail with your password reminder"), vars)
form.hidden("redirect", redirect)
form.input(self._("Your e-mail"), "email", email)
form.submit(None, None, self._("Remind"))
self.call("auth.form", form, vars)
self.call("web.response_global", form.html(), vars)
def auth_message(self, message, vars):
vars["message"] = message
self.call("web.response_template", "common/message.html", vars)
def ext_captcha(self):
req = self.req()
session = req.session(True)
if session is None:
self.call("web.forbidden")
field = 25
char_w = 35
char_h = 40
step = 25
digits = 6
jitter = 0.15 # 0.15
image = Image.new("RGB", (step * (digits - 1) + char_w + field * 2, char_h + field * 2), (255, 255, 255))
draw = ImageDraw.Draw(image)
number = ""
ts = [t / 50.0 for t in range(51)]
for i in range(0, digits):
digit = random.randint(0, 9)
number += str(digit)
off_x = i * step + field
off_y = field + char_h * random.uniform(-0.1, 0.1)
if digit == 0:
splines = [
((0, 0.33), (0.33, -0.1), (0.67, -0.1), (1, 0.33), (1, 0.67)),
((1, 0.67), (0.67, 1.1), (0.33, 1.1), (0, 0.67), (0, 0.33)),
]
elif digit == 1:
splines = [
((0, 0.5), (0.6, 0)),
((0.6, 0), (0.6, 1)),
]
elif digit == 2:
splines = [
((0.1, 0.33), (0.33, -0.1), (0.67, -0.1), (0.9, 0.33)),
((0.9, 0.33), (0.9, 0.66), (0.1, 0.95)),
((0.1, 0.95), (1, 1)),
]
elif digit == 3:
splines = [
((0, 0.33), (0.33, -0.1), (0.67, -0.1), (1, 0.25), (1, 0.5), (0.33, 0.5)),
((0.33, 0.5), (1, 0.5), (1, 0.75), (0.66, 1.1), (0.33, 1.1), (0, 0.67)),
]
elif digit == 4:
splines = [
((0, 0), (0, 0.5), (0.8, 0.5)),
((0.8, 0), (0.8, 0.5), (0.8, 1)),
]
elif digit == 5:
splines = [
((0.8, 0), (0.2, 0), (0.2, 0.5)),
((0.2, 0.5), (0.6, 0.5), (0.8, 0.75), (0.6, 1), (0.2, 1)),
]
elif digit == 6:
splines = [
((1, 0), (0.67, -0.1), (0.33, -0.1), (0, 0.33), (0, 0.67)),
((0, 0.67), (0.33, 1.1), (0.67, 1.1), (1, 0.67)),
((1, 0.67), (0.67, 0.33), (0.33, 0.33), (0, 0.67))
]
elif digit == 7:
splines = [
((0, 0), (0.67, 0), (1, 0.33)),
((1, 0.33), (0.5, 0.5), (0.5, 1)),
]
elif digit == 8:
splines = [
((0.5, 0.5), (0.2, 0.5), (-0.2, 0.67), (0.2, 1), (0.5, 1)),
((0.5, 1), (0.8, 1), (1.2, 0.67), (0.8, 0.5), (0.5, 0.5)),
((0.5, 0.5), (0.2, 0.5), (-0.2, 0.33), (0.2, 0), (0.5, 0)),
((0.5, 0), (0.8, 0), (1.2, 0.33), (0.8, 0.5), (0.5, 0.5)),
]
elif digit == 9:
splines = [
((0, 1), (0.33, 1.1), (0.67, 1.1), (1, 0.67), (1, 0.33)),
((1, 0.33), (0.67, -0.1), (0.33, -0.1), (0, 0.33)),
((0, 0.33), (0.33, 0.67), (0.67, 0.67), (1, 0.33))
]
points = []
corrections = {}
for spline in splines:
corr = corrections.get(spline[0])
if corr is None:
x1 = spline[0][0] + random.uniform(-jitter, jitter)
y1 = spline[0][1] + random.uniform(-jitter, jitter)
corrections[spline[0]] = (x1, y1)
else:
x1 = corr[0]
y1 = corr[1]
xys = [(x1, y1)]
for i in range(1, len(spline)):
corr = corrections.get(spline[i])
if corr is None:
x2 = spline[i][0] + random.uniform(-jitter, jitter)
y2 = spline[i][1] + random.uniform(-jitter, jitter)
corrections[spline[i]] = (x2, y2)
else:
x2 = corr[0]
y2 = corr[1]
xys.append((x2, y2))
x1 = x2
y1 = y2
xys = [(x * char_w + off_x, y * char_h + off_y) for x, y in xys]
bezier = make_bezier(xys)
points.extend(bezier(ts))
draw.line(points, fill=(119, 119, 119), width=1)
del draw
captcha = self.obj(Captcha, session.uuid, silent=True)
captcha.set("number", number)
captcha.set("valid_till", "%020d" % (self.time() + 86400))
captcha.store()
data = cStringIO.StringIO()
image = image.filter(ImageFilter.MinFilter(3))
image.save(data, "JPEG")
self.call("web.response", data.getvalue(), "image/jpeg")
def ext_logout(self):
req = self.req()
session = req.session()
if session is not None:
user = session.get("user")
if user:
with self.lock(["session.%s" % session.uuid, "user.%s" % user]):
session.set("semi_user", user)
session.delkey("user")
session.set("ip", req.remote_addr())
session.store()
self.call("session.log", act="logout", session=session.uuid, ip=req.remote_addr(), user=user)
req = self.req()
redirect = req.param("redirect")
if redirect:
self.call("web.redirect", redirect)
self.call("web.redirect", "/")
def messages(self, msg):
msg["name_empty"] = self._("Enter your name or email")
msg["name_unknown"] = self._("User not found")
msg["user_inactive"] = self._("User is not active. Check your e-mail and enter activation code")
msg["password_empty"] = self._("Enter your password")
msg["password_incorrect"] = self._("Incorrect password")
def ext_login(self):
req = self.req()
form = self.call("web.form")
name = req.param("name")
password = req.param("password")
redirect = req.param("redirect")
msg = {}
self.call("auth.messages", msg)
if req.ok():
session = req.session(True)
if not name:
form.error("name", msg["name_empty"])
else:
user = self.call("session.find_user", name)
if user is None:
form.error("name", msg["name_unknown"])
elif user.get("inactive"):
with self.lock(["session.%s" % session.uuid]):
session.load()
session.delkey("user")
session.set("semi_user", user.uuid)
session.set("ip", req.remote_addr())
session.store()
self.call("session.log", act="logout", session=session.uuid, ip=req.remote_addr(), user=user.uuid)
self.call("web.redirect", "/auth/activate/%s" % user.uuid)
if not password:
form.error("password", msg["password_empty"])
if not form.errors:
m = hashlib.md5()
m.update(user.get("salt").encode("utf-8") + password.encode("utf-8"))
if m.hexdigest() != user.get("pass_hash"):
form.error("password", msg["password_incorrect"])
if not form.errors:
with self.lock(["session.%s" % session.uuid]):
session.load()
session.set("user", user.uuid)
session.delkey("semi_user")
session.set("ip", req.remote_addr())
session.store()
self.call("session.log", act="login", session=session.uuid, ip=req.remote_addr(), user=user.uuid)
if redirect is not None and redirect != "":
self.call("web.redirect", redirect)
redirects = {}
self.call("auth.redirects", redirects)
if redirects.has_key("login"):
self.call("web.redirect", redirects["login"])
self.call("web.redirect", "/")
if redirect is not None:
form.hidden("redirect", redirect)
form.input(self._("User name"), "name", name)
form.password(self._("Password"), "password", password)
form.submit(None, None, self._("Log in"))
form.add_message_bottom(self._("If this is your first visit, %s.") % ('<a href="/auth/register?redirect=%s">%s</a>' % (urlencode(redirect), self._("register please"))))
form.add_message_bottom('<a href="/auth/remind?redirect=%s">%s</a>' % (urlencode(redirect), self._("Forgotten your password?")))
vars = {
"title": self._("User login"),
"ret": {
"href": "/",
"title": self._("Cancel"),
},
}
self.call("auth.form", form, vars)
self.call("web.response_global", form.html(vars), vars)
def ext_change(self):
ret = "/"
redirects = {}
self.call("auth.redirects", redirects)
if redirects.has_key("change"):
ret = redirects["change"]
req = self.req()
vars = {
"title": self._("Password change"),
}
form = self.call("web.form")
if req.ok():
prefix = req.param("prefix")
else:
prefix = uuid4().hex
password = req.param(prefix + "_p")
password1 = req.param(prefix + "_p1")
password2 = req.param(prefix + "_p2")
if req.ok():
user_uuid = self.call("auth.password-user") or req.user()
user = self.obj(User, user_uuid)
if not password:
form.error(prefix + "_p", self._("Enter your old password"))
if not form.errors:
if not user.get("salt"):
form.error(prefix + "_p", self._("User has not password"))
else:
m = hashlib.md5()
m.update(user.get("salt").encode("utf-8") + password.encode("utf-8"))
if m.hexdigest() != user.get("pass_hash"):
form.error(prefix + "_p", self._("Incorrect old password"))
if not password1:
form.error(prefix + "_p1", self._("Enter your new password"))
elif len(password1) < 6:
form.error(prefix + "_p1", self._("Minimal password length - 6 characters"))
elif not password2:
form.error(prefix + "_p2", self._("Retype your new password"))
elif password1 != password2:
form.error(prefix + "_p2", self._("Password don't match. Try again, please"))
password1 = ""
password2 = ""
if not form.errors:
salt = ""
letters = "abcdefghijklmnopqrstuvwxyz"
for i in range(0, 10):
salt += random.choice(letters)
user.set("salt", salt)
user.set("pass_reminder", self.call("auth.password-reminder", password1))
m = hashlib.md5()
m.update(salt + password1.encode("utf-8"))
user.set("pass_hash", m.hexdigest())
user.store()
my_session = req.session()
sessions = self.objlist(SessionList, query_index="user", query_equal=user.uuid)
for sess in sessions:
if sess.uuid != my_session.uuid:
with self.lock(["session.%s" % sess.uuid]):
sess.load()
sess.delkey("user")
sess.delkey("semi_user")
sess.store()
self.call("auth.password-changed", user, password1)
vars["ret"] = {
"href": ret,
"html": self._("Return")
}
self.call("auth.message", self._("Your password was changed successfully"), vars)
form.hidden("prefix", prefix)
form.password(self._("Old password"), prefix + "_p", password)
form.password(self._("New password"), prefix + "_p1", password1)
form.password(self._("Confirm new password"), prefix + "_p2", password2)
form.submit(None, None, self._("Change"))
self.call("auth.form", form, vars)
self.call("web.response_global", form.html(vars), vars)
def password_reminder(self, password):
if self.conf("auth.insecure_password_reminder"):
return password
return re.sub(r'^(..).*$', r'\1...', password)
def ext_email(self):
req = self.req()
user = self.obj(User, req.user())
if req.args == "confirm":
form = self.call("web.form")
code = req.param("code")
redirect = req.param("redirect")
if req.ok():
if not code:
form.error("code", self._("Enter your code"))
else:
if user.get("email_change"):
if user.get("email_confirmation_code") != code:
form.error("code", self._("Invalid code"))
else:
existing_email = self.objlist(UserList, query_index="email", query_equal=user.get("email_change"))
existing_email.load(silent=True)
if len(existing_email):
form.error("code", self._("There is another user with this email"))
else:
user.set("email", user.get("email_change"))
user.delkey("email_change")
user.delkey("email_confirmation_code")
user.store()
if not form.errors:
redirects = {}
self.call("auth.redirects", redirects)
if redirects.has_key("change"):
self.call("web.redirect", redirects["change"])
self.call("web.redirect", "/")
form.input(self._("Confirmation code from your post box"), "code", code)
form.submit(None, None, self._("btn///Confirm"))
vars = {
"title": self._("E-mail confirmation"),
}
self.call("web.response_global", form.html(), vars)
form = self.call("web.form")
if req.ok():
prefix = req.param("prefix")
else:
prefix = uuid4().hex
password = req.param(prefix + "_p")
email = req.param("email")
if req.ok():
if not password:
form.error(prefix + "_p", self._("Enter your old password"))
if not form.errors:
m = hashlib.md5()
m.update(user.get("salt").encode("utf-8") + password.encode("utf-8"))
if m.hexdigest() != user.get("pass_hash"):
form.error(prefix + "_p", self._("Incorrect old password"))
if not email:
form.error("email", self._("Enter new e-mail address"))
elif not re.match(r'^[a-zA-Z0-9_\-+\.]+@[a-zA-Z0-9\-_\.]+\.[a-zA-Z0-9]+$', email):
form.error("email", self._("Enter correct e-mail"))
else:
existing_email = self.objlist(UserList, query_index="email", query_equal=email.lower())
existing_email.load(silent=True)
if len(existing_email):
form.error("email", self._("There is another user with this email"))
if not form.errors:
user.set("email_change", email.lower())
code = uuid4().hex
user.set("email_confirmation_code", code)
user.store()
params = {
"subject": self._("E-mail confirmation"),
"content": self._("Someone possibly you requested e-mail change on the {host}. If you really want to do this enter the following confirmation code on the site:\n\n{code}\n\nor simply follow the link:\n\n{protocol}://{host}/auth/email/confirm?code={code}"),
}
self.call("auth.email_change_email", params)
self.call("email.send", email, user.get("name"), params["subject"], params["content"].format(code=code, host=req.host(), protocol=self.app().protocol))
self.call("web.redirect", "/auth/email/confirm")
form.hidden("prefix", prefix)
form.input(self._("New e-mail address"), "email", email)
form.password(self._("Your current password"), prefix + "_p", password)
form.submit(None, None, self._("Change"))
ret = "/"
redirects = {}
self.call("auth.redirects", redirects)
if redirects.has_key("change"):
ret = redirects["change"]
vars = {
"title": self._("E-mail change"),
}
self.call("auth.form", form, vars)
self.call("web.response_global", form.html(vars), vars)
def permissions_list(self, perms):
perms.append({"id": "permissions", "name": self._("Giving permissions to users")})
perms.append({"id": "users", "name": self._("User profiles")})
perms.append({"id": "change.passwords", "name": self._("Change passwords for other users")})
perms.append({"id": "change.usernames", "name": self._("Change names for other users")})
perms.append({"id": "auth.tracking", "name": self._("Multicharing tracker")})
def auth_permissions(self, user_id):
perms = {}
if user_id:
if user_id == self.clconf("admin_user"):
perms["admin"] = True
perms["global.admin"] = True
perms["global_admin"] = True
try:
p = self.obj(UserPermissions, user_id)
for key in p.get("perms").keys():
perms[key] = True
perms[re_nonalphanum.sub('_', key)] = True
except ObjectNotFoundException:
pass
return perms
def auth_grant_permission(self, user_id, perm):
try:
p = self.obj(UserPermissions, user_id)
except ObjectNotFoundException:
p = self.obj(UserPermissions, user_id, data={})
p.set("perms", {})
perms = p.get("perms")
perms[perm] = True
p.touch()
p.store()
def menu_root_index(self, menu):
menu.append({"id": "auth.index", "text": self._("Authentication"), "order": 500})
req = self.req()
if req.has_access("users"):
menu.append({"id": "auth/user-dashboard/%s" % req.user(), "text": self._("My dossier"), "leaf": True, "order": 2, "icon": "/st-mg/menu/myform.png"})
menu.append({"id": "auth/user-find", "text": self._("Find user"), "leaf": True, "order": 3, "icon": "/st-mg/menu/find.png"})
def menu_auth_index(self, menu):
req = self.req()
if req.has_access("permissions") or req.has_access("admin"):
menu.append({"id": "auth/permissions", "text": self._("Permissions"), "leaf": True, "order": 10})
if req.has_access("users"):
menu.append({"id": "auth/user-lastreg", "text": self._("Last registered users"), "leaf": True, "order": 20})
def admin_permissions(self):
req = self.req()
if not req.has_access("permissions") and not req.has_access("admin"):
self.call("web.forbidden")
permissions_list = []
self.call("permissions.list", permissions_list)
users = []
user_permissions = self.objlist(UserPermissionsList, query_index="any", query_equal="1")
if len(user_permissions):
user_permissions.load()
perms = dict([(obj.uuid, obj.get("perms")) for obj in user_permissions])
usr = self.objlist(UserList, perms.keys())
usr.load()
for u in usr:
grant_list = []
p = perms[u.uuid]
for perm in permissions_list:
if p.get(perm["id"]):
grant_list.append(perm["name"])
users.append({"id": u.uuid, "name": htmlescape(u.get("name")), "permissions": "<br />".join(grant_list)})
vars = {
"editpermissions": self._("Edit permissions of a user"),
"user_name": self._("User name"),
"permissions": self._("Permissions"),
"edit": self._("edit"),
"editing": self._("Editing"),
"users": users,
}
self.call("admin.response_template", "admin/auth/permissions.html", vars)
def headmenu_permissions(self, args):
return self._("User permissions")
def admin_editpermissions(self):
req = self.req()
if not req.has_access("permissions") and not req.has_access("admin"):
self.call("web.forbidden")
req = self.req()
name = req.param("name")
if req.ok():
errors = {}
if not name:
errors["name"] = self._("Enter user name")
else:
user = self.call("session.find_user", name)
if not user:
errors["name"] = self._("User not found")
else:
self.call("admin.redirect", "auth/edituserpermissions/%s" % user.uuid)
self.call("web.response_json", {"success": False, "errors": errors})
fields = [
{"name": "name", "label": self._("User name"), "value": name},
]
buttons = [{"text": self._("Search")}]
self.call("admin.form", fields=fields, buttons=buttons)
def headmenu_editpermissions(self, args):
return [self._("Edit permissions of a user"), "auth/permissions"]
def admin_edituserpermissions(self):
req = self.req()
if not req.has_access("permissions") and not req.has_access("admin"):
self.call("web.forbidden")
try:
user = self.obj(User, req.args)
except ObjectNotFoundException:
self.call("web.not_found")
perms = []
self.call("permissions.list", perms)
try:
user_permissions = self.obj(UserPermissions, req.args)
except ObjectNotFoundException:
user_permissions = self.obj(UserPermissions, req.args, {})
if req.ok():
perm_values = {}
for perm in perms:
if req.param("perm%s" % perm["id"]):
perm_values[perm["id"]] = True
if perm_values:
user_permissions.set("perms", perm_values)
user_permissions.sync()
user_permissions.store()
else:
user_permissions.remove()
if req.args == req.user():
del req._permissions
self.call("admin.update_menu")
self.call("auth.permissions-changed", user)
self.call("admin.redirect", "auth/permissions")
else:
perm_values = user_permissions.get("perms")
if not perm_values:
perm_values = {}
fields = []
for perm in perms:
fields.append({"name": "perm%s" % perm["id"], "label": u'%s (char.perm_%s)' % (perm["name"], re_nonalphanum.sub('_', perm["id"])), "type": "checkbox", "checked": perm_values.get(perm["id"])})
self.call("admin.form", fields=fields)
def headmenu_edituserpermissions(self, args):
user = self.obj(User, args)
return [htmlescape(user.get("name")), "auth/editpermissions"]
def list_roles(self, roles):
permissions_list = []
roles.append(("all", self._("Everybody")))
roles.append(("logged", self._("Logged in")))
roles.append(("notlogged", self._("Not logged in")))
self.call("permissions.list", permissions_list)
has_priv = self._("Privilege: %s")
for perm in permissions_list:
roles.append(("perm:%s" % perm["id"], has_priv % perm["name"]))
def users_roles(self, users, roles):
lst = self.objlist(UserPermissionsList, users)
lst.load(silent=True)
perms = ["all", "logged"]
for user in users:
try:
roles[user].extend(perms)
except KeyError:
roles[user] = ["all", "logged"]
for user in lst:
perms = user.get("perms")
if perms is not None:
if "project.admin" in perms or "global.admin" in perms:
permissions_list = []
self.call("permissions.list", permissions_list)
perms = ["perm:%s" % perm["id"] for perm in permissions_list]
try:
roles[user.uuid].extend(perms)
except KeyError:
roles[user.uuid] = perms
else:
perms = ["perm:%s" % perm for perm in perms.keys()]
try:
roles[user.uuid].extend(perms)
except KeyError:
roles[user.uuid] = perms
def headmenu_user_dashboard(self, args):
try:
user = self.obj(User, args)
except ObjectNotFoundException:
return
return [self._("User %s") % htmlescape(user.get("name", user.uuid))]
def ext_user_find(self):
req = self.req()
name = req.param("name")
if req.ok():
errors = {}
if not name:
errors["name"] = self._("Enter user name")
else:
user = self.call("session.find_user", name)
if not user:
errors["name"] = self._("User not found")
else:
self.call("admin.redirect", "auth/user-dashboard/%s" % user.uuid)
self.call("web.response_json", {"success": False, "errors": errors})
fields = [
{"name": "name", "label": self._("User name"), "value": name},
]
buttons = [{"text": self._("Search")}]
self.call("admin.form", fields=fields, buttons=buttons)
def ext_user_dashboard(self):
req = self.req()
try:
user = self.obj(User, req.args)
except ObjectNotFoundException:
self.call("web.not_found")
vars = {
"user": {
"uuid": user.uuid,
},
"Update": self._("Update"),
}
tables = []
tbl = {
"type": "auth",
"title": self._("Authentication"),
"order": -10,
"links": [],
"rows": [],
}
if req.has_access("change.passwords"):
tbl["links"].append({"id": "chpass", "hook": "auth/change-password/%s" % user.uuid, "text": self._("Change password")})
if req.has_access("change.names"):
tbl["links"].append({"id": "chname", "hook": "auth/change-name/%s" % user.uuid, "text": self._("Change name")})
if req.has_access("auth.tracking"):
tbl["links"].append({"id": "tracking", "hook": "auth/track/user/%s" % user.uuid, "text": self._("Track user")})
if req.has_access("permissions"):
tbl["links"].append({"id": "perms", "hook": "auth/edituserpermissions/%s" % user.uuid, "text": self._("Permissions")})
self.call("auth.user-auth-table", user, tbl)
if not tbl["rows"]:
del tbl["rows"]
if tbl.get("links") or tbl.get("rows"):
tables.append(tbl)
self.call("auth.user-tables", user, tables)
if len(tables):
tables.sort(cmp=lambda a, b: cmp(a.get("order", 0), b.get("order", 0)))
for tbl in tables:
if tbl.get("links") is not None:
if tbl["links"]:
tbl["links"][-1]["lst"] = True
else:
del tbl["links"]
active_tab = intz(req.param("active_tab"))
for i in xrange(0, len(tables)):
tbl = tables[i]
if tbl.get("type"):
if req.param("active_tab") == tbl.get("type"):
active_tab = i
else:
tbl["type"] = str(i)
tables[-1]["lst"] = True
vars["tables"] = tables
vars["active_tab"] = active_tab
self.call("admin.response_template", "admin/auth/user-dashboard.html", vars)
def ext_user_lastreg(self):
tables = []
users = self.objlist(UserList, query_index="created", query_reversed=True, query_limit=30)
users.load()
tables.append({
"header": [self._("Registration"), self._("ID"), self._("Name"), self._("Active")],
"rows": [(datetime_to_human(from_unixtime(u.get("created"))), '<hook:admin.link href="auth/user-dashboard/{0}" title="{0}" />'.format(u.uuid), htmlescape(u.get("name")), self._("no") if u.get("inactive") else self._("yes")) for u in users]
})
vars = {
"tables": tables
}
self.call("admin.response_template", "admin/common/tables.html", vars)
def autologin(self, user_uuid, interval=60):
autologin = self.obj(AutoLogin, data={})
autologin.set("user", user_uuid)
autologin.set("valid_till", "%020d" % (self.time() + interval))
autologin.store()
return autologin.uuid
def admin_change_name(self):
req = self.req()
try:
user = self.obj(User, req.args)
except ObjectNotFoundException:
self.call("web.not_found")
if req.ok():
with self.lock(["User.%s" % user.uuid]):
user.load()
# auth params
params = {}
self.call("auth.form_params", params)
# checking form
errors = {}
name = req.param("name")
if not name:
errors["name"] = self._("Specify new name")
elif not user.get("name"):
errors["name"] = self._("This user can't have a name")
elif not re.match(params["name_re"], name, re.UNICODE):
errors["name"] = params["name_invalid_re"]
else:
existing = self.call("session.find_user", name, return_id=True)
if existing and existing != user.uuid:
errors["name"] = self._("This name is taken already")
if len(errors):
self.call("web.response_json", {"success": False, "errors": errors})
# storing
old_name = user.get("name")
if old_name != name:
user.set("name", name)
user.set("name_lower", name.lower())
user.store()
self.call("auth.name-changed", user, old_name, name)
self.call("admin.redirect", "auth/user-dashboard/%s" % user.uuid, {"active_tab": "auth"})
fields = []
fields.append({"name": "name", "label": self._("New name"), "value": user.get("name")})
buttons = []
buttons.append({"text": self._("Change name")})
self.call("admin.form", fields=fields, buttons=buttons)
def headmenu_change_name(self, args):
return [self._("Name changing"), "auth/user-dashboard/%s?active_tab=auth" % args]
def admin_change_password(self):
req = self.req()
try:
user = self.obj(User, req.args)
except ObjectNotFoundException:
self.call("web.not_found")
if req.ok():
errors = {}
password = req.param("password")
if not password:
errors["password"] = self._("Specify new password")
elif not user.get("pass_hash"):
errors["password"] = self._("This user can't have a password")
if len(errors):
self.call("web.response_json", {"success": False, "errors": errors})
salt = ""
letters = "abcdefghijklmnopqrstuvwxyz"
for i in range(0, 10):
salt += random.choice(letters)
user.set("salt", salt)
user.set("pass_reminder", self.call("auth.password-reminder", password))
m = hashlib.md5()
m.update(salt + password.encode("utf-8"))
user.set("pass_hash", m.hexdigest())
user.store()
my_session = req.session()
sessions = self.objlist(SessionList, query_index="user", query_equal=user.uuid)
for sess in sessions:
if sess.uuid != my_session.uuid:
with self.lock(["session.%s" % sess.uuid]):
sess.load()
sess.delkey("user")
sess.delkey("semi_user")
sess.store()
self.call("auth.password-changed", user, password)
self.call("admin.redirect", "auth/user-dashboard/%s" % user.uuid, {"active_tab": "auth"})
fields = []
fields.append({"name": "password", "label": self._("New password")})
buttons = []
buttons.append({"text": self._("Change password")})
self.call("admin.form", fields=fields, buttons=buttons)
def headmenu_change_password(self, args):
return [self._("Password changing"), "auth/user-dashboard/%s?active_tab=auth" % args]
def headmenu_auth_track(self, args):
m = re_track_user.match(args)
if m:
return [self._("Tracking"), "auth/user-dashboard/%s?active_tab=auth" % m.group(1)]
m = re_track_player.match(args)
if m:
return [self._("Tracking player"), "auth/user-dashboard/%s?active_tab=auth" % m.group(1)]
m = re_track_ip.match(args)
if m:
return [self._("Tracking IP %s") % m.group(1)]
m = re_track_cookie.match(args)
if m:
return [self._("Tracking Cookie %s") % m.group(1)]
def admin_auth_track(self):
req = self.req()
m = re_track_user.match(req.args)
if m:
index = "user_performed"
equal = m.group(1)
else:
m = re_track_player.match(req.args)
if m:
index = "player_performed"
equal = m.group(1)
else:
m = re_track_cookie.match(req.args)
if m:
index = "session_performed"
equal = m.group(1)
else:
m = re_track_ip.match(req.args)
if m:
index = "ip_performed"
equal = m.group(1)
else:
self.call("web.not_found")
rows = []
lst = self.objlist(AuthLogList, query_index=index, query_equal=equal, query_reversed=True, query_limit=log_per_page)
lst.load(silent=True)
users = {}
for ent in lst:
if ent.get("user"):
users[ent.get("user")] = None
if len(users):
lst2 = self.objlist(UserList, users.keys())
lst2.load(silent=True)
for ent in lst2:
users[ent.uuid] = ent
for ent in lst:
user = ent.get("user")
if user:
uinfo = users.get(user)
user = '<hook:admin.link href="auth/track/user/%s" title="%s" />' % (user, htmlescape(uinfo.get("name", user)) if uinfo else user)
player = ent.get("player")
if player:
player = '<hook:admin.link href="auth/track/player/%s" title="%s" />' % (player, re_short.sub(r'\1...', player))
cookie = ent.get("session")
cookie_short = re_short.sub(r'\1...', cookie)
rows.append([
self.call("l10n.time_local", ent.get("performed")),
'<hook:admin.link href="auth/track/ip/{ip}" title="{ip}" />'.format(ip=ent.get("ip")) if ent.get("ip") else None,
'<hook:admin.link href="auth/track/cookie/{cookie}" title="{cookie_short}" />'.format(cookie=cookie, cookie_short=cookie_short),
user,
player,
ent.get("act"),
])
vars = {
"tables": [
{
"header": [
self._("Performed"),
self._("IP address"),
self._("Cookie"),
self._("User"),
self._("Player"),
self._("Action"),
],
"rows": rows,
}
],
}
self.call("admin.response_template", "admin/common/tables.html", vars)
class PermissionsEditor(Module):
""" PermissionsEditor is a interface to grant and revoke permissions, view actual permissions """
def __init__(self, app, objclass, permissions, default_rules=None):
Module.__init__(self, app, "mg.core.PermissionsEditor")
self.objclass = objclass
self.permissions = permissions
self.default_rules = default_rules
def request(self, args=None):
if args is None:
args = self.req().args
m = re_permissions_args.match(args)
if not m:
self.call("web.not_found")
uuid, args = m.group(1, 2)
self.uuid = uuid
try:
self.perms = self.obj(self.objclass, uuid)
except ObjectNotFoundException:
rules = []
if self.default_rules:
self.call(self.default_rules, rules)
self.perms = self.obj(self.objclass, uuid, {"rules": rules})
if args == "" or args is None:
self.index()
m = re.match(r'^/del/(\d+)$', args)
if m:
self.delete(intz(m.groups(1)[0]))
self.call("web.not_found")
def index(self):
roles = []
self.call("security.list-roles", roles)
fields = []
req = self.req()
if req.param("ok"):
roles_dict = dict(roles)
permissions_dict = dict(self.permissions)
errors = {}
rules_cnt = intz(req.param("rules"))
if rules_cnt > 1000:
rules_cnt = 1000
new_rules = []
ord = intz(req.param("ord"))
role = req.param("v_role")
perm = req.param("v_perm")
error = req.param("error").strip()
if role or perm:
if not role or not roles_dict.get(role):
errors["role"] = self._("Select valid role")
if not perm or not permissions_dict.get(perm):
errors["perm"] = self._("Select valid permission")
new_rules.append((ord, role, perm, error))
for n in range(0, rules_cnt):
ord = intz(req.param("ord%d" % n))
role = req.param("v_role%d" % n)
perm = req.param("v_perm%d" % n)
error = req.param("error%d" % n).strip()
if not role or not roles_dict.get(role):
errors["role%d" % n] = self._("Select valid role")
if not perm or not permissions_dict.get(perm):
errors["perm%d" % n] = self._("Select valid permission")
new_rules.append((ord, role, perm, error))
if len(errors):
self.call("web.response_json", {"success": False, "errors": errors})
new_rules.sort(key=itemgetter(0))
new_rules = [(role, perm, error) for ord, role, perm, error in new_rules]
self.perms.set("rules", new_rules)
self.perms.store()
self.call("admin.redirect", "forum/access/%s" % self.uuid)
rules = self.perms.get("rules")
for n in range(0, len(rules)):
rule = rules[n]
error = rule[2] if len(rule) >= 3 else None
fields.append({"name": "ord%d" % n, "value": n + 1, "width": 100})
fields.append({"name": "role%d" % n, "type": "combo", "values": roles, "value": rule[0], "inline": True})
fields.append({"name": "perm%d" % n, "type": "combo", "values": self.permissions, "value": rule[1], "inline": True})
fields.append({"name": "error%d" % n, "value": error, "inline": True})
fields.append({"type": "button", "width": 100, "text": self._("Delete"), "action": "forum/access/%s/del/%d" % (self.uuid, n), "inline": True})
fields.append({"name": "ord", "value": len(rules) + 1, "label": self._("Add") if rules else None, "width": 100})
fields.append({"name": "role", "type": "combo", "values": roles, "label": " " if rules else None, "inline": True})
fields.append({"name": "perm", "type": "combo", "values": self.permissions, "label": " " if rules else None, "inline": True})
fields.append({"name": "error", "inline": True, "label": " "})
fields.append({"type": "empty", "width": 100, "inline": True})
fields[0]["label"] = self._("Order")
fields[1]["label"] = self._("Role")
fields[2]["label"] = self._("Permission")
fields[3]["label"] = self._("Error on match")
fields[4]["label"] = " "
fields.append({"type": "hidden", "name": "rules", "value": len(rules)})
self.call("admin.form", fields=fields)
def delete(self, index):
rules = self.perms.get("rules")
try:
del rules[index]
self.perms.touch()
self.perms.store()
except IndexError:
pass
self.call("admin.redirect", "forum/access/%s" % self.uuid)
class Dossiers(Module):
def register(self):
self.rhook("permissions.list", self.permissions_list)
self.rhook("auth.user-tables", self.user_tables)
self.rhook("ext-admin-auth.write-dossier", self.admin_write_dossier, priv="users.dossiers")
self.rhook("dossier.write", self.dossier_write)
def permissions_list(self, perms):
perms.append({"id": "users.dossiers", "name": self._("Viewing users dossiers")})
def user_tables(self, user, tables):
req = self.req()
if req.has_access("users.dossiers"):
dossier_info = {
"user": user.uuid
}
vars = {
"Write": self._("Write a message to the dossier"),
"user": user.uuid,
}
self.call("dossier.before-display", dossier_info, vars)
dossier_entries = []
records = self.objlist(DossierRecordList, query_index="user_performed", query_equal=dossier_info["user"], query_reversed=True)
records.load(silent=True)
users = {}
for ent in records:
if ent.get("admin"):
users[ent.get("admin")] = None
if users:
ulst = self.objlist(UserList, uuids=users.keys())
ulst.load(silent=True)
for ent in ulst:
users[ent.uuid] = ent
for ent in records:
admin = users.get(ent.get("admin")) if ent.get("admin") else None
content = re_newline.sub('<br />', htmlescape(ent.get("content")))
dossier_entries.append([self.call("l10n.time_local", ent.get("performed")), u'<hook:admin.link href="auth/user-dashboard/{0}" title="{1}" />'.format(admin.uuid, htmlescape(admin.get("name"))) if admin else None, content])
table = {
"type": "dossier",
"title": self._("Dossier"),
"order": 100,
"header": [self._("dossier///Performed"), self._("Administrator"), self._("Event")],
"rows": dossier_entries,
"before": self.call("web.parse_template", "admin/auth/write-dossier.html", vars),
}
self.call("dossier.after-display", records, users, table)
tables.append(table)
def dossier_write(self, **kwargs):
rec = self.obj(DossierRecord)
for key, value in kwargs.iteritems():
rec.set(key, value)
rec.set("performed", self.now())
self.call("dossier.record", rec)
rec.store()
return rec
def admin_write_dossier(self):
req = self.req()
try:
user = self.obj(User, req.args)
except ObjectNotFoundException:
self.call("web.not_found")
content = req.param("content").strip()
errors = {}
if content == "":
errors["content"] = self._("Content must not be empty")
if len(errors):
self.call("web.response_json", {"success": False, "errors": errors})
rec = self.call("dossier.write", user=user.uuid, admin=req.user(), content=content)
self.call("admin.redirect", "auth/user-dashboard/%s" % req.args, {"active_tab": "dossier"})
|
JoyTeam/metagam
|
mg/core/auth.py
|
Python
|
gpl-3.0
| 73,211
|
[
"VisIt"
] |
55063824a9ad810322c17ce8850720f9a8d9b094a9e9d5b6688ff6ee7d2b3f58
|
""" DIRAC FileCatalog component representing a flat directory tree
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=protected-access
__RCSID__ = "$Id$"
import six
import os
import stat
from DIRAC import S_OK, S_ERROR
from DIRAC.Core.Utilities.List import stringListToString, intListToString
from DIRAC.DataManagementSystem.DB.FileCatalogComponents.DirectoryManager.DirectoryTreeBase import DirectoryTreeBase
class DirectoryFlatTree(DirectoryTreeBase):
def __init__(self, database=None):
DirectoryTreeBase.__init__(self, database)
self.treeTable = 'DirectoryInfo'
def getDirectoryCounters(self):
req = "SELECT COUNT(*) FROM DirectoryInfo"
res = self.db._query(req)
if not res['OK']:
return res
return S_OK({'DirectoryInfo': res['Value'][0][0]})
def _findDirectories(self, paths, metadata=[]):
""" Find file ID if it exists for the given list of LFNs """
successful = {}
failed = {}
req = "SELECT DirName,DirID"
if metadata:
req = "%s,%s" % (req, intListToString(metadata))
req = "%s FROM DirectoryInfo WHERE DirName IN (%s)" % (req, stringListToString(paths))
res = self.db._query(req)
if not res['OK']:
return res
for tup in res['Value']:
dirName = tup[0]
dirID = tup[1]
metaDict = {'DirID': dirID}
metaDict.update(dict(zip(metadata, tup[2:])))
successful[dirName] = metaDict
for path in paths:
if path not in successful:
failed[path] = 'No such file or directory'
return S_OK({"Successful": successful, "Failed": failed})
def __findDirs(self, paths, metadata=['DirName']):
dirs = {}
req = "SELECT DirID,%s FROM DirectoryInfo WHERE DirName IN (%s)" % (
intListToString(metadata), stringListToString(paths))
res = self.db._query(req)
if not res['OK']:
return res
if not res['Value']:
return S_OK(dirs)
for tup in res['Value']:
dirID = tup[0]
dirs[dirID] = dict(zip(metadata, tup[1:]))
return S_OK(dirs)
def getPathPermissions(self, paths, credDict):
""" Get the permissions for the supplied paths """
res = self.db.ugManager.getUserAndGroupID(credDict)
if not res['OK']:
return res
uid, gid = res['Value']
res = self._findDirectories(paths, metadata=['Mode', 'UID', 'GID'])
if not res['OK']:
return res
successful = {}
for dirName, dirDict in res['Value']['Successful'].items():
mode = dirDict['Mode']
p_uid = dirDict['UID']
p_gid = dirDict['GID']
successful[dirName] = {}
if p_uid == uid:
successful[dirName]['Read'] = mode & stat.S_IRUSR
successful[dirName]['Write'] = mode & stat.S_IWUSR
successful[dirName]['Execute'] = mode & stat.S_IXUSR
elif p_gid == gid:
successful[dirName]['Read'] = mode & stat.S_IRGRP
successful[dirName]['Write'] = mode & stat.S_IWGRP
successful[dirName]['Execute'] = mode & stat.S_IXGRP
else:
successful[dirName]['Read'] = mode & stat.S_IROTH
successful[dirName]['Write'] = mode & stat.S_IWOTH
successful[dirName]['Execute'] = mode & stat.S_IXOTH
return S_OK({'Successful': successful, 'Failed': res['Value']['Failed']})
def findDir(self, path):
res = self.__findDirs([path])
if not res['OK']:
return res
if not res['Value']:
return S_OK(0)
return S_OK(list(res['Value'])[0])
def removeDir(self, path):
""" Remove directory """
res = self.findDir(path)
if not res['OK']:
return res
if not res['Value']:
return S_OK()
dirID = res['Value']
req = "DELETE FROM DirectoryInfo WHERE DirID=%d" % dirID
return self.db._update(req)
def makeDirectory(self, path, credDict, status=0):
"""Create a new directory.
The return value is the dictionary containing all the parameters of the newly created directory
"""
if path[0] != '/':
return S_ERROR('Not an absolute path')
result = self.findDir(path)
if not result['OK']:
return result
if result['Value']:
return S_OK(result['Value'])
result = self.db.ugManager.getUserAndGroupID(credDict)
if not result['OK']:
return result
uid, gid = result['Value']
res = self.getParent(path)
if not res['OK']:
return res
parentID = res['Value']
req = "INSERT INTO DirectoryInfo (Parent,Status,DirName,UID,GID,Mode,CreationDate,ModificationDate)\
VALUES (%d,%d,'%s',%d,%d,%d,UTC_TIMESTAMP(),UTC_TIMESTAMP());" % (parentID, status, path, uid, gid, self.db.umask)
result = self.db._update(req)
if not result['OK']:
self.removeDir(path)
return S_ERROR('Failed to create directory %s' % path)
return S_OK(result['lastRowId'])
def makeDir(self, path):
result = self.findDir(path)
if not result['OK']:
return result
dirID = result['Value']
if dirID:
return S_OK(dirID)
names = ['DirName']
values = [path]
result = self.db.insertFields('DirectoryInfo', names, values)
if not result['OK']:
return result
return S_OK(result['lastRowId'])
def existsDir(self, path):
""" Check the existence of a directory at the specified path
"""
result = self.findDir(path)
if not result['OK']:
return result
if not result['Value']:
return S_OK({"Exists": False})
return S_OK({"Exists": True, "DirID": result['Value']})
def getParent(self, path):
""" Get the parent ID of the given directory """
return self.findDir(os.path.dirname(path))
def getParentID(self, dirID):
""" Get the ID of the parent of a directory specified by ID
"""
if dirID == 0:
return S_ERROR('Root directory ID given')
req = "SELECT Parent FROM DirectoryInfo WHERE DirID=%d" % dirID
result = self.db._query(req)
if not result['OK']:
return result
if not result['Value']:
return S_ERROR('No parent found')
return S_OK(result['Value'][0][0])
def getDirectoryPath(self, dirID):
""" Get directory name by directory ID """
req = "SELECT DirName FROM DirectoryInfo WHERE DirID=%d" % int(dirID)
result = self.db._query(req)
if not result['OK']:
return result
if not result['Value']:
return S_ERROR('Directory with id %d not found' % int(dirID))
return S_OK(result['Value'][0][0])
def getDirectoryName(self, dirID):
""" Get directory name by directory ID """
result = self.getDirectoryPath(dirID)
if not result['OK']:
return result
return S_OK(os.path.basename(result['Value']))
def getPathIDs(self, path):
""" Get IDs of all the directories in the parent hierarchy """
elements = path.split('/')
pelements = []
dPath = ''
for el in elements[1:]:
dPath += '/' + el
pelements.append(dPath)
pathString = ["'" + p + "'" for p in pelements]
req = "SELECT DirID FROM DirectoryInfo WHERE DirName in (%s) ORDER BY DirID" % ','.join(pathString)
result = self.db._query(req)
if not result['OK']:
return result
if not result['Value']:
return S_ERROR('Directory %s not found' % path)
return S_OK([x[0] for x in result['Value']])
def getChildren(self, path):
""" Get child directory IDs for the given directory """
if isinstance(path, six.string_types):
result = self.findDir(path)
if not result['OK']:
return result
dirID = result['Value']
else:
dirID = path
req = "SELECT DirID FROM DirectoryInfo WHERE Parent=%d" % dirID
result = self.db._query(req)
if not result['OK']:
return result
if not result['Value']:
return S_OK([])
return S_OK([x[0] for x in result['Value']])
|
yujikato/DIRAC
|
src/DIRAC/DataManagementSystem/DB/FileCatalogComponents/DirectoryManager/DirectoryFlatTree.py
|
Python
|
gpl-3.0
| 7,780
|
[
"DIRAC"
] |
c9c6ef8643b38c45374f60875ffc28d880d813f1ddac16a36fffc63b4a6c7ba3
|
from __future__ import division
import collections
import re
import sector
import vector3
# "Imagine the galaxy is a giant slice of Battenberg
# which for reasons beyond our ken has had small chunks
# of carrot cake pushed into it all over the place..."
# - CMDR Jackie Silver
# This does not validate sector names, just ensures that it matches the 'Something AB-C d1' or 'Something AB-C d1-23' format
pg_system_regex = re.compile("^(?P<sector>[\\w\\s'.()/-]+) (?P<l1>[A-Za-z])(?P<l2>[A-Za-z])-(?P<l3>[A-Za-z]) (?P<mcode>[A-Za-z])(?:(?P<n1>\\d+)-)?(?P<n2>\\d+)$")
# Hopefully-complete list of valid name fragments / phonemes
cx_raw_fragments = [
"Th", "Eo", "Oo", "Eu", "Tr", "Sly", "Dry", "Ou",
"Tz", "Phl", "Ae", "Sch", "Hyp", "Syst", "Ai", "Kyl",
"Phr", "Eae", "Ph", "Fl", "Ao", "Scr", "Shr", "Fly",
"Pl", "Fr", "Au", "Pry", "Pr", "Hyph", "Py", "Chr",
"Phyl", "Tyr", "Bl", "Cry", "Gl", "Br", "Gr", "By",
"Aae", "Myc", "Gyr", "Ly", "Myl", "Lych", "Myn", "Ch",
"Myr", "Cl", "Rh", "Wh", "Pyr", "Cr", "Syn", "Str",
"Syr", "Cy", "Wr", "Hy", "My", "Sty", "Sc", "Sph",
"Spl", "A", "Sh", "B", "C", "D", "Sk", "Io",
"Dr", "E", "Sl", "F", "Sm", "G", "H", "I",
"Sp", "J", "Sq", "K", "L", "Pyth", "M", "St",
"N", "O", "Ny", "Lyr", "P", "Sw", "Thr", "Lys",
"Q", "R", "S", "T", "Ea", "U", "V", "W",
"Schr", "X", "Ee", "Y", "Z", "Ei", "Oe",
"ll", "ss", "b", "c", "d", "f", "dg", "g", "ng", "h", "j", "k", "l", "m", "n",
"mb", "p", "q", "gn", "th", "r", "s", "t", "ch", "tch", "v", "w", "wh",
"ck", "x", "y", "z", "ph", "sh", "ct", "wr", "o", "ai", "a", "oi", "ea",
"ie", "u", "e", "ee", "oo", "ue", "i", "oa", "au", "ae", "oe", "scs",
"wsy", "vsky", "sms", "dst", "rb", "nts", "rd", "rld", "lls", "rgh",
"rg", "hm", "hn", "rk", "rl", "rm", "cs", "wyg", "rn", "hs", "rbs", "rp",
"tts", "wn", "ms", "rr", "mt", "rs", "cy", "rt", "ws", "lch", "my", "ry",
"nks", "nd", "sc", "nk", "sk", "nn", "ds", "sm", "sp", "ns", "nt", "dy",
"st", "rrs", "xt", "nz", "sy", "xy", "rsch", "rphs", "sts", "sys", "sty",
"tl", "tls", "rds", "nch", "rns", "ts", "wls", "rnt", "tt", "rdy", "rst",
"pps", "tz", "sks", "ppy", "ff", "sps", "kh", "sky", "lts", "wnst", "rth",
"ths", "fs", "pp", "ft", "ks", "pr", "ps", "pt", "fy", "rts", "ky",
"rshch", "mly", "py", "bb", "nds", "wry", "zz", "nns", "ld", "lf",
"gh", "lks", "sly", "lk", "rph", "ln", "bs", "rsts", "gs", "ls", "vvy",
"lt", "rks", "qs", "rps", "gy", "wns", "lz", "nth", "phs", "io", "oea",
"aa", "ua", "eia", "ooe", "iae", "oae", "ou", "uae", "ao", "eae", "aea",
"ia", "eou", "aei", "uia", "aae", "eau" ]
# Sort fragments by length to ensure we check the longest ones first
cx_fragments = sorted(cx_raw_fragments, key=len, reverse=True)
# Order here is relevant, keep it
cx_prefixes = cx_raw_fragments[0:111]
#
# Sequences used in runs
#
# Vowel-ish infixes
c1_infixes_s1 = [
"o", "ai", "a", "oi", "ea", "ie", "u", "e",
"ee", "oo", "ue", "i", "oa", "au", "ae", "oe"
]
# Consonant-ish infixes
c1_infixes_s2 = [
"ll", "ss", "b", "c", "d", "f", "dg", "g",
"ng", "h", "j", "k", "l", "m", "n", "mb",
"p", "q", "gn", "th", "r", "s", "t", "ch",
"tch", "v", "w", "wh", "ck", "x", "y", "z",
"ph", "sh", "ct", "wr"
]
c1_infixes = [
[],
c1_infixes_s1,
c1_infixes_s2
]
# Sequence 1
cx_suffixes_s1 = [
"oe", "io", "oea", "oi", "aa", "ua", "eia", "ae",
"ooe", "oo", "a", "ue", "ai", "e", "iae", "oae",
"ou", "uae", "i", "ao", "au", "o", "eae", "u",
"aea", "ia", "ie", "eou", "aei", "ea", "uia", "oa",
"aae", "eau", "ee"
]
# Sequence 2
c1_suffixes_s2 = [
"b", "scs", "wsy", "c", "d", "vsky", "f", "sms",
"dst", "g", "rb", "h", "nts", "ch", "rd", "rld",
"k", "lls", "ck", "rgh", "l", "rg", "m", "n",
# Formerly sequence 4/5...
"hm", "p", "hn", "rk", "q", "rl", "r", "rm",
"s", "cs", "wyg", "rn", "ct", "t", "hs", "rbs",
"rp", "tts", "v", "wn", "ms", "w", "rr", "mt",
"x", "rs", "cy", "y", "rt", "z", "ws", "lch", # "y" is speculation
"my", "ry", "nks", "nd", "sc", "ng", "sh", "nk",
"sk", "nn", "ds", "sm", "sp", "ns", "nt", "dy",
"ss", "st", "rrs", "xt", "nz", "sy", "xy", "rsch",
"rphs", "sts", "sys", "sty", "th", "tl", "tls", "rds",
"nch", "rns", "ts", "wls", "rnt", "tt", "rdy", "rst",
"pps", "tz", "tch", "sks", "ppy", "ff", "sps", "kh",
"sky", "ph", "lts", "wnst", "rth", "ths", "fs", "pp",
"ft", "ks", "pr", "ps", "pt", "fy", "rts", "ky",
"rshch", "mly", "py", "bb", "nds", "wry", "zz", "nns",
"ld", "lf", "gh", "lks", "sly", "lk", "ll", "rph",
"ln", "bs", "rsts", "gs", "ls", "vvy", "lt", "rks",
"qs", "rps", "gy", "wns", "lz", "nth", "phs"
]
# Class 2 appears to use a subset of sequence 2
c2_suffixes_s2 = c1_suffixes_s2[0:len(cx_suffixes_s1)]
c1_suffixes = [
[],
cx_suffixes_s1,
c1_suffixes_s2
]
c2_suffixes = [
[],
cx_suffixes_s1,
c2_suffixes_s2
]
# These prefixes use the specified index into the c2_suffixes list
c2_prefix_suffix_override_map = {
"Eo": 2, "Oo": 2, "Eu": 2,
"Ou": 2, "Ae": 2, "Ai": 2,
"Eae": 2, "Ao": 2, "Au": 2,
"Aae": 2
}
# These prefixes use the specified index into the c1_infixes list
c1_prefix_infix_override_map = {
"Eo": 2, "Oo": 2, "Eu": 2, "Ou": 2,
"Ae": 2, "Ai": 2, "Eae": 2, "Ao": 2,
"Au": 2, "Aae": 2, "A": 2, "Io": 2,
"E": 2, "I": 2, "O": 2, "Ea": 2,
"U": 2, "Ee": 2, "Ei": 2, "Oe": 2
}
# The default run length for most prefixes
cx_prefix_length_default = 35
# Some prefixes use short run lengths; specify them here
cx_prefix_length_overrides = {
'Eu': 31, 'Sly': 4, 'Tz': 1, 'Phl': 13,
'Ae': 12, 'Hyp': 25, 'Kyl': 30, 'Phr': 10,
'Eae': 4, 'Ao': 5, 'Scr': 24, 'Shr': 11,
'Fly': 20, 'Pry': 3, 'Hyph': 14, 'Py': 12,
'Phyl': 8, 'Tyr': 25, 'Cry': 5, 'Aae': 5,
'Myc': 2, 'Gyr': 10, 'Myl': 12, 'Lych': 3,
'Myn': 10, 'Myr': 4, 'Rh': 15, 'Wr': 31,
'Sty': 4, 'Spl': 16, 'Sk': 27, 'Sq': 7,
'Pyth': 1, 'Lyr': 10, 'Sw': 24, 'Thr': 32,
'Lys': 10, 'Schr': 3, 'Z': 34,
}
# Get the total length of one run over all prefixes
cx_prefix_total_run_length = sum([cx_prefix_length_overrides.get(p, cx_prefix_length_default) for p in cx_prefixes])
# Default infix run lengths
c1_infix_s1_length_default = len(c1_suffixes_s2)
c1_infix_s2_length_default = len(cx_suffixes_s1)
# Some infixes use short runs too
c1_infix_length_overrides = {
# Sequence 1
'oi': 88, 'ue': 147, 'oa': 57,
'au': 119, 'ae': 12, 'oe': 39,
# Sequence 2
'dg': 31, 'tch': 20, 'wr': 31,
}
# Total lengths of runs over all infixes, for each sequence
c1_infix_s1_total_run_length = sum([c1_infix_length_overrides.get(p, c1_infix_s1_length_default) for p in c1_infixes_s1])
c1_infix_s2_total_run_length = sum([c1_infix_length_overrides.get(p, c1_infix_s2_length_default) for p in c1_infixes_s2])
# Hand-authored sectors
ha_sectors = collections.OrderedDict([
("trianguli sector", sector.HASector(vector3.Vector3(60.85156, -47.94922, -81.32031), 50.0, "Trianguli Sector")),
("crucis sector", sector.HASector(vector3.Vector3(75.91016, 8.32812, 44.83984), 60.0, "Crucis Sector")),
("tascheter sector", sector.HASector(vector3.Vector3(1.46094, -22.39844, -62.74023), 50.0, "Tascheter Sector")),
("hydrae sector", sector.HASector(vector3.Vector3(77.57031, 84.07031, 69.47070), 60.0, "Hydrae Sector")),
("col 285 sector", sector.HASector(vector3.Vector3(-53.46875, 56.27344, -19.35547), 326.0, "Col 285 Sector")),
("scorpii sector", sector.HASector(vector3.Vector3(37.69141, 0.51953, 126.83008), 60.0, "Scorpii Sector")),
("shui wei sector", sector.HASector(vector3.Vector3(67.51172, -119.44922, 24.85938), 80.0, "Shui Wei Sector")),
("shudun sector", sector.HASector(vector3.Vector3(-3.51953, 34.16016, 12.98047), 30.0, "Shudun Sector")),
("yin sector", sector.HASector(vector3.Vector3(6.42969, 20.21094, -46.98047), 50.0, "Yin Sector")),
("jastreb sector", sector.HASector(vector3.Vector3(-12.51953, 3.82031, -40.75000), 50.0, "Jastreb Sector")),
("pegasi sector", sector.HASector(vector3.Vector3(-170.26953, -95.17188, -19.18945), 100.0, "Pegasi Sector")),
("cephei sector", sector.HASector(vector3.Vector3(-107.98047, 30.05078, -42.23047), 50.0, "Cephei Sector")),
("bei dou sector", sector.HASector(vector3.Vector3(-33.64844, 72.48828, -20.64062), 40.0, "Bei Dou Sector")),
("puppis sector", sector.HASector(vector3.Vector3(56.69141, 5.23828, -28.21094), 50.0, "Puppis Sector")),
("sharru sector", sector.HASector(vector3.Vector3(37.87891, 60.19922, -34.04297), 50.0, "Sharru Sector")),
("alrai sector", sector.HASector(vector3.Vector3(-38.60156, 23.42188, 68.25977), 70.0, "Alrai Sector")),
("lyncis sector", sector.HASector(vector3.Vector3(-68.51953, 65.10156, -141.03906), 70.0, "Lyncis Sector")),
("tucanae sector", sector.HASector(vector3.Vector3(105.60938, -218.21875, 159.47070), 100.0, "Tucanae Sector")),
("piscium sector", sector.HASector(vector3.Vector3(-44.83984, -54.75000, -29.10938), 60.0, "Piscium Sector")),
("herculis sector", sector.HASector(vector3.Vector3(-73.00000, 70.64844, 38.49023), 50.0, "Herculis Sector")),
("antliae sector", sector.HASector(vector3.Vector3(175.87109, 65.89062, 29.18945), 70.0, "Antliae Sector")),
("arietis sector", sector.HASector(vector3.Vector3(-72.16016, -76.82812, -135.36914), 80.0, "Arietis Sector")),
("capricorni sector", sector.HASector(vector3.Vector3(-58.37891, -119.78906, 107.34961), 60.0, "Capricorni Sector")),
("ceti sector", sector.HASector(vector3.Vector3(-14.10156, -116.94922, -32.50000), 70.0, "Ceti Sector")),
("core sys sector", sector.HASector(vector3.Vector3(0.00000, 0.00000, 0.00000), 50.0, "Core Sys Sector")),
("blanco 1 sector", sector.HASector(vector3.Vector3(-42.28906, -864.69922, 157.82031), 231.0, "Blanco 1 Sector")),
("ngc 129 sector", sector.HASector(vector3.Vector3(-4571.64062, -231.18359, -2671.45117), 309.0, "NGC 129 Sector")),
("ngc 225 sector", sector.HASector(vector3.Vector3(-1814.48828, -41.08203, -1133.81836), 100.0, "NGC 225 Sector")),
("ngc 188 sector", sector.HASector(vector3.Vector3(-5187.57031, 2556.32422, -3343.16016), 331.0, "NGC 188 Sector")),
("ic 1590 sector", sector.HASector(vector3.Vector3(-7985.20703, -1052.35156, -5205.49023), 558.0, "IC 1590 Sector")),
("ngc 457 sector", sector.HASector(vector3.Vector3(-6340.41797, -593.83203, -4708.80859), 461.0, "NGC 457 Sector")),
("m103 sector", sector.HASector(vector3.Vector3(-5639.37109, -224.90234, -4405.96094), 105.0, "M103 Sector")),
("ngc 654 sector", sector.HASector(vector3.Vector3(-5168.34375, -46.49609, -4200.19922), 97.0, "NGC 654 Sector")),
("ngc 659 sector", sector.HASector(vector3.Vector3(-4882.00391, -165.43750, -4010.12305), 92.0, "NGC 659 Sector")),
("ngc 663 sector", sector.HASector(vector3.Vector3(-4914.64062, -100.05469, -4051.31836), 260.0, "NGC 663 Sector")),
("col 463 sector", sector.HASector(vector3.Vector3(-1793.73438, 381.90234, -1371.41211), 200.0, "Col 463 Sector")),
("ngc 752 sector", sector.HASector(vector3.Vector3(-929.80469, -589.36328, -1004.09766), 326.0, "NGC 752 Sector")),
("ngc 744 sector", sector.HASector(vector3.Vector3(-2892.49609, -425.51562, -2641.21289), 115.0, "NGC 744 Sector")),
("stock 2 sector", sector.HASector(vector3.Vector3(-718.91406, -32.82422, -679.84180), 130.0, "Stock 2 Sector")),
("h persei sector", sector.HASector(vector3.Vector3(-4817.47266, -437.52734, -4750.67383), 355.0, "h Persei Sector")),
("chi persei sector", sector.HASector(vector3.Vector3(-5389.26172, -480.34766, -5408.10742), 401.0, "Chi Persei Sector")),
("ic 1805 sector", sector.HASector(vector3.Vector3(-4370.87891, 96.60156, -4325.34375), 358.0, "IC 1805 Sector")),
("ngc 957 sector", sector.HASector(vector3.Vector3(-4085.48438, -278.87109, -4275.21484), 190.0, "NGC 957 Sector")),
("tr 2 sector", sector.HASector(vector3.Vector3(-1431.65234, -144.19141, -1556.91211), 112.0, "Tr 2 Sector")),
("m34 sector", sector.HASector(vector3.Vector3(-931.64062, -438.33984, -1263.64648), 171.0, "M34 Sector")),
("ngc 1027 sector", sector.HASector(vector3.Vector3(-1756.25391, 65.96484, -1805.99609), 147.0, "NGC 1027 Sector")),
("ic 1848 sector", sector.HASector(vector3.Vector3(-4436.20312, 102.57031, -4790.66406), 342.0, "IC 1848 Sector")),
("ngc 1245 sector", sector.HASector(vector3.Vector3(-5101.33984, -1451.18359, -7736.58789), 246.0, "NGC 1245 Sector")),
("ngc 1342 sector", sector.HASector(vector3.Vector3(-884.15234, -576.25781, -1896.07422), 95.0, "NGC 1342 Sector")),
("ic 348 sector", sector.HASector(vector3.Vector3(-402.66016, -383.08203, -1130.80273), 26.0, "IC 348 Sector")),
("mel 22 sector", sector.HASector(vector3.Vector3(-104.13672, -195.38672, -437.12695), 172.0, "Mel 22 Sector")),
("ngc 1444 sector", sector.HASector(vector3.Vector3(-2065.66016, -88.70703, -3318.62500), 46.0, "NGC 1444 Sector")),
("ngc 1502 sector", sector.HASector(vector3.Vector3(-1572.28906, 359.08203, -2140.41211), 63.0, "NGC 1502 Sector")),
("ngc 1528 sector", sector.HASector(vector3.Vector3(-1183.84766, 13.24609, -2235.89648), 118.0, "NGC 1528 Sector")),
("ngc 1545 sector", sector.HASector(vector3.Vector3(-1038.79297, 8.09766, -2074.42578), 122.0, "NGC 1545 Sector")),
("hyades sector", sector.HASector(vector3.Vector3(0.00000, -56.67578, -138.88086), 144.0, "Hyades Sector")),
("ngc 1647 sector", sector.HASector(vector3.Vector3(11.76172, -508.69531, -1684.84180), 205.0, "NGC 1647 Sector")),
("ngc 1662 sector", sector.HASector(vector3.Vector3(178.12891, -512.99609, -1317.47070), 83.0, "NGC 1662 Sector")),
("ngc 1664 sector", sector.HASector(vector3.Vector3(-1227.67969, -27.29688, -3712.16406), 171.0, "NGC 1664 Sector")),
("ngc 1746 sector", sector.HASector(vector3.Vector3(-35.15625, -380.61719, -2014.04883), 251.0, "NGC 1746 Sector")),
("ngc 1778 sector", sector.HASector(vector3.Vector3(-921.61719, -167.16797, -4697.52930), 98.0, "NGC 1778 Sector")),
("ngc 1817 sector", sector.HASector(vector3.Vector3(665.49609, -1457.36719, -6227.20508), 281.0, "NGC 1817 Sector")),
("ngc 1857 sector", sector.HASector(vector3.Vector3(-1246.36328, 140.66016, -6071.80273), 109.0, "NGC 1857 Sector")),
("ngc 1893 sector", sector.HASector(vector3.Vector3(-1192.19141, -317.42969, -10628.63672), 343.0, "NGC 1893 Sector")),
("m38 sector", sector.HASector(vector3.Vector3(-466.23828, 42.51562, -3448.36328), 203.0, "M38 Sector")),
("col 69 sector", sector.HASector(vector3.Vector3(366.92969, -299.39453, -1359.90039), 300.0, "Col 69 Sector")),
("ngc 1981 sector", sector.HASector(vector3.Vector3(578.95703, -423.23828, -1084.28711), 106.0, "NGC 1981 Sector")),
("trapezium sector", sector.HASector(vector3.Vector3(594.46875, -431.80859, -1072.44922), 182.0, "Trapezium Sector")),
("col 70 sector", sector.HASector(vector3.Vector3(508.68359, -372.59375, -1090.87891), 514.0, "Col 70 Sector")),
("m36 sector", sector.HASector(vector3.Vector3(-412.07422, 75.04688, -4279.55078), 126.0, "M36 Sector")),
("m37 sector", sector.HASector(vector3.Vector3(-180.73047, 243.89453, -4499.77148), 184.0, "M37 Sector")),
("ngc 2129 sector", sector.HASector(vector3.Vector3(567.78906, 8.62109, -4907.25391), 72.0, "NGC 2129 Sector")),
("ngc 2169 sector", sector.HASector(vector3.Vector3(921.21484, -173.53516, -3299.41602), 50.0, "NGC 2169 Sector")),
("m35 sector", sector.HASector(vector3.Vector3(305.50781, 102.11328, -2640.42383), 194.0, "M35 Sector")),
("ngc 2175 sector", sector.HASector(vector3.Vector3(940.29688, 37.07031, -5225.95117), 78.0, "NGC 2175 Sector")),
("col 89 sector", sector.HASector(vector3.Vector3(603.48438, 273.61719, -4187.90430), 593.0, "Col 89 Sector")),
("ngc 2232 sector", sector.HASector(vector3.Vector3(655.20312, -154.73828, -956.90234), 154.0, "NGC 2232 Sector")),
("col 97 sector", sector.HASector(vector3.Vector3(878.88281, -64.39062, -1850.92383), 250.0, "Col 97 Sector")),
("ngc 2244 sector", sector.HASector(vector3.Vector3(2092.95703, -164.37500, -4216.23242), 412.0, "NGC 2244 Sector")),
("ngc 2251 sector", sector.HASector(vector3.Vector3(1733.50781, 7.55859, -3967.84375), 126.0, "NGC 2251 Sector")),
("col 107 sector", sector.HASector(vector3.Vector3(2591.42578, -89.05859, -5042.36914), 578.0, "Col 107 Sector")),
("ngc 2264 sector", sector.HASector(vector3.Vector3(851.16406, 83.68359, -2005.22070), 510.0, "NGC 2264 Sector")),
("m41 sector", sector.HASector(vector3.Vector3(1731.03125, -400.21094, -1396.76758), 350.0, "M41 Sector")),
("ngc 2286 sector", sector.HASector(vector3.Vector3(5456.35547, -379.24609, -7706.28711), 385.0, "NGC 2286 Sector")),
("ngc 2281 sector", sector.HASector(vector3.Vector3(-151.60938, 535.15234, -1732.92383), 133.0, "NGC 2281 Sector")),
("ngc 2301 sector", sector.HASector(vector3.Vector3(1530.08984, 14.87109, -2392.53125), 116.0, "NGC 2301 Sector")),
("col 121 sector", sector.HASector(vector3.Vector3(1246.80469, -278.00000, -860.11328), 459.0, "Col 121 Sector")),
("m50 sector", sector.HASector(vector3.Vector3(2015.20703, -63.45703, -2261.81836), 124.0, "M50 Sector")),
("ngc 2324 sector", sector.HASector(vector3.Vector3(2088.35938, 218.74219, -3167.16211), 78.0, "NGC 2324 Sector")),
("ngc 2335 sector", sector.HASector(vector3.Vector3(3185.22266, -104.81641, -3344.81250), 135.0, "NGC 2335 Sector")),
("ngc 2345 sector", sector.HASector(vector3.Vector3(5319.95703, -294.56641, -5048.45312), 257.0, "NGC 2345 Sector")),
("ngc 2343 sector", sector.HASector(vector3.Vector3(2402.10547, -66.03906, -2461.52930), 51.0, "NGC 2343 Sector")),
("ngc 2354 sector", sector.HASector(vector3.Vector3(11248.28125, -1574.77344, -6919.98828), 500.0, "NGC 2354 Sector")),
("ngc 2353 sector", sector.HASector(vector3.Vector3(2567.32812, 25.48047, -2594.35547), 192.0, "NGC 2353 Sector")),
("col 132 sector", sector.HASector(vector3.Vector3(1355.99609, -235.59766, -690.91602), 426.0, "Col 132 Sector")),
("col 135 sector", sector.HASector(vector3.Vector3(942.32812, -198.29688, -365.50586), 150.0, "Col 135 Sector")),
("ngc 2360 sector", sector.HASector(vector3.Vector3(4695.94141, -150.25781, -3968.37891), 233.0, "NGC 2360 Sector")),
("ngc 2362 sector", sector.HASector(vector3.Vector3(3826.82812, -449.91797, -2381.99023), 66.0, "NGC 2362 Sector")),
("ngc 2367 sector", sector.HASector(vector3.Vector3(5384.37891, -433.42969, -3686.76172), 77.0, "NGC 2367 Sector")),
("col 140 sector", sector.HASector(vector3.Vector3(1186.89453, -181.42578, -548.42188), 162.0, "Col 140 Sector")),
("ngc 2374 sector", sector.HASector(vector3.Vector3(3581.40625, 83.59766, -3179.72266), 210.0, "NGC 2374 Sector")),
("ngc 2384 sector", sector.HASector(vector3.Vector3(5674.66016, -288.94141, -3914.68555), 101.0, "NGC 2384 Sector")),
("ngc 2395 sector", sector.HASector(vector3.Vector3(674.53906, 404.00781, -1473.32031), 64.0, "NGC 2395 Sector")),
("ngc 2414 sector", sector.HASector(vector3.Vector3(8802.37109, 393.31641, -7026.83984), 164.0, "NGC 2414 Sector")),
("m47 sector", sector.HASector(vector3.Vector3(1241.61328, 86.52734, -1005.43945), 117.0, "M47 Sector")),
("ngc 2423 sector", sector.HASector(vector3.Vector3(1925.25391, 156.97656, -1587.05859), 88.0, "NGC 2423 Sector")),
("mel 71 sector", sector.HASector(vector3.Vector3(7730.26562, 807.34375, -6743.53906), 240.0, "Mel 71 Sector")),
("ngc 2439 sector", sector.HASector(vector3.Vector3(11484.73047, -964.35938, -5017.55664), 330.0, "NGC 2439 Sector")),
("m46 sector", sector.HASector(vector3.Vector3(3516.44531, 320.30859, -2757.24609), 261.0, "M46 Sector")),
("m93 sector", sector.HASector(vector3.Vector3(2930.09375, 11.79688, -1684.87891), 99.0, "M93 Sector")),
("ngc 2451a sector", sector.HASector(vector3.Vector3(757.34375, -93.33594, -240.24414), 105.0, "NGC 2451A Sector")),
("ngc 2477 sector", sector.HASector(vector3.Vector3(3808.06641, -403.21484, -1120.77539), 175.0, "NGC 2477 Sector")),
("ngc 2467 sector", sector.HASector(vector3.Vector3(3941.64844, 30.85547, -1999.71289), 193.0, "NGC 2467 Sector")),
("ngc 2482 sector", sector.HASector(vector3.Vector3(3850.51562, 152.85938, -2081.96484), 153.0, "NGC 2482 Sector")),
("ngc 2483 sector", sector.HASector(vector3.Vector3(4895.04688, 28.32812, -2303.43359), 142.0, "NGC 2483 Sector")),
("ngc 2489 sector", sector.HASector(vector3.Vector3(11855.98828, -180.25000, -5105.99414), 263.0, "NGC 2489 Sector")),
("ngc 2516 sector", sector.HASector(vector3.Vector3(1276.15234, -364.36719, 87.00000), 117.0, "NGC 2516 Sector")),
("ngc 2506 sector", sector.HASector(vector3.Vector3(8599.23047, 1962.22266, -7063.48828), 395.0, "NGC 2506 Sector")),
("col 173 sector", sector.HASector(vector3.Vector3(1341.08203, -193.03516, -202.82031), 500.0, "Col 173 Sector")),
("ngc 2527 sector", sector.HASector(vector3.Vector3(1790.95312, 64.98438, -793.64062), 58.0, "NGC 2527 Sector")),
("ngc 2533 sector", sector.HASector(vector3.Vector3(10181.95312, 249.56250, -4155.17969), 160.0, "NGC 2533 Sector")),
("ngc 2539 sector", sector.HASector(vector3.Vector3(3519.28906, 856.72266, -2585.17578), 117.0, "NGC 2539 Sector")),
("ngc 2547 sector", sector.HASector(vector3.Vector3(1457.24609, -218.75781, -137.75000), 108.0, "NGC 2547 Sector")),
("ngc 2546 sector", sector.HASector(vector3.Vector3(2894.65234, -104.69922, -781.03711), 611.0, "NGC 2546 Sector")),
("m48 sector", sector.HASector(vector3.Vector3(1795.49219, 666.54688, -1622.35156), 220.0, "M48 Sector")),
("ngc 2567 sector", sector.HASector(vector3.Vector3(5126.51953, 286.27734, -1886.19336), 144.0, "NGC 2567 Sector")),
("ngc 2571 sector", sector.HASector(vector3.Vector3(4083.74219, -275.02344, -1559.42969), 102.0, "NGC 2571 Sector")),
("ngc 2579 sector", sector.HASector(vector3.Vector3(3250.51562, 17.64453, -889.24023), 89.0, "NGC 2579 Sector")),
("pismis 4 sector", sector.HASector(vector3.Vector3(1912.67578, -80.82031, -245.01953), 102.0, "Pismis 4 Sector")),
("ngc 2627 sector", sector.HASector(vector3.Vector3(6248.08594, 773.52734, -2078.46094), 193.0, "NGC 2627 Sector")),
("ngc 2645 sector", sector.HASector(vector3.Vector3(5410.67188, -275.22656, -492.41016), 48.0, "NGC 2645 Sector")),
("ngc 2632 sector", sector.HASector(vector3.Vector3(221.48438, 327.75391, -464.35156), 125.0, "NGC 2632 Sector")),
("ic 2391 sector", sector.HASector(vector3.Vector3(565.85938, -68.47656, 3.95117), 100.0, "IC 2391 Sector")),
("ic 2395 sector", sector.HASector(vector3.Vector3(2290.90234, -152.42969, -136.10547), 114.0, "IC 2395 Sector")),
("ngc 2669 sector", sector.HASector(vector3.Vector3(3389.15234, -374.19531, 41.40820), 199.0, "NGC 2669 Sector")),
("ngc 2670 sector", sector.HASector(vector3.Vector3(3858.68750, -243.00000, -168.47461), 91.0, "NGC 2670 Sector")),
("tr 10 sector", sector.HASector(vector3.Vector3(1369.04297, 14.44922, -172.95117), 57.0, "Tr 10 Sector")),
("m67 sector", sector.HASector(vector3.Vector3(1466.01953, 1555.39453, -2047.71289), 216.0, "M67 Sector")),
("ic 2488 sector", sector.HASector(vector3.Vector3(3654.96484, -283.85938, 500.66797), 194.0, "IC 2488 Sector")),
("ngc 2910 sector", sector.HASector(vector3.Vector3(8461.80469, -178.01172, 784.97852), 99.0, "NGC 2910 Sector")),
("ngc 2925 sector", sector.HASector(vector3.Vector3(2505.64453, -52.77344, 263.35352), 74.0, "NGC 2925 Sector")),
("ngc 3114 sector", sector.HASector(vector3.Vector3(2883.98828, -196.83203, 681.74609), 312.0, "NGC 3114 Sector")),
("ngc 3228 sector", sector.HASector(vector3.Vector3(1733.04688, 141.95312, 330.59570), 26.0, "NGC 3228 Sector")),
("ngc 3247 sector", sector.HASector(vector3.Vector3(4886.86328, -26.44141, 1272.93359), 74.0, "NGC 3247 Sector")),
("ic 2581 sector", sector.HASector(vector3.Vector3(7722.32031, 0.00000, 2011.51367), 117.0, "IC 2581 Sector")),
("ngc 3293 sector", sector.HASector(vector3.Vector3(7299.60547, 13.24609, 2079.34766), 133.0, "NGC 3293 Sector")),
("ngc 3324 sector", sector.HASector(vector3.Vector3(7259.77734, -26.39062, 2109.16016), 264.0, "NGC 3324 Sector")),
("ngc 3330 sector", sector.HASector(vector3.Vector3(2824.55859, 193.51953, 714.72266), 43.0, "NGC 3330 Sector")),
("col 228 sector", sector.HASector(vector3.Vector3(6846.64453, -125.30859, 2158.73828), 293.0, "Col 228 Sector")),
("ic 2602 sector", sector.HASector(vector3.Vector3(497.46484, -45.26953, 177.13867), 155.0, "IC 2602 Sector")),
("tr 14 sector", sector.HASector(vector3.Vector3(8501.81641, -93.30469, 2664.30664), 130.0, "Tr 14 Sector")),
("tr 16 sector", sector.HASector(vector3.Vector3(8311.20312, -106.53125, 2636.46875), 254.0, "Tr 16 Sector")),
("ngc 3519 sector", sector.HASector(vector3.Vector3(4392.18359, -90.03516, 1642.16992), 82.0, "NGC 3519 Sector")),
("fe 1 sector", sector.HASector(vector3.Vector3(3551.95312, 26.39062, 1292.80469), 275.0, "Fe 1 Sector")),
("ngc 3532 sector", sector.HASector(vector3.Vector3(1497.35938, 41.62109, 533.18555), 232.0, "NGC 3532 Sector")),
("ngc 3572 sector", sector.HASector(vector3.Vector3(6089.70312, 22.72266, 2301.10742), 95.0, "NGC 3572 Sector")),
("col 240 sector", sector.HASector(vector3.Vector3(4804.97656, 17.94141, 1825.23828), 374.0, "Col 240 Sector")),
("ngc 3590 sector", sector.HASector(vector3.Vector3(5015.87109, -18.78125, 1945.52734), 47.0, "NGC 3590 Sector")),
("ngc 3680 sector", sector.HASector(vector3.Vector3(2802.88672, 889.54688, 846.24219), 107.0, "NGC 3680 Sector")),
("ngc 3766 sector", sector.HASector(vector3.Vector3(5194.02734, 0.00000, 2323.40039), 83.0, "NGC 3766 Sector")),
("ic 2944 sector", sector.HASector(vector3.Vector3(5317.44531, -142.92969, 2434.51562), 613.0, "IC 2944 Sector")),
("stock 14 sector", sector.HASector(vector3.Vector3(6333.31641, -85.51953, 2980.23242), 102.0, "Stock 14 Sector")),
("ngc 4103 sector", sector.HASector(vector3.Vector3(4713.57031, 111.41406, 2464.19336), 93.0, "NGC 4103 Sector")),
("ngc 4349 sector", sector.HASector(vector3.Vector3(6160.53516, 99.13281, 3528.17188), 207.0, "NGC 4349 Sector")),
("mel 111 sector", sector.HASector(vector3.Vector3(21.80859, 308.30078, -23.96680), 109.0, "Mel 111 Sector")),
("ngc 4463 sector", sector.HASector(vector3.Vector3(2938.90234, -119.35547, 1744.99219), 512.0, "NGC 4463 Sector")),
("ngc 5281 sector", sector.HASector(vector3.Vector3(2797.33984, -44.10156, 2281.45508), 512.0, "NGC 5281 Sector")),
("ngc 4609 sector", sector.HASector(vector3.Vector3(3387.39062, -6.96484, 2108.46484), 512.0, "NGC 4609 Sector")),
("jewel box sector", sector.HASector(vector3.Vector3(5383.63281, 280.91016, 3522.95117), 188.0, "Jewel Box Sector")),
("ngc 5138 sector", sector.HASector(vector3.Vector3(5131.33984, 395.59375, 3937.41602), 132.0, "NGC 5138 Sector")),
("ngc 5316 sector", sector.HASector(vector3.Vector3(3024.62891, 6.91016, 2556.00781), 250.0, "NGC 5316 Sector")),
("ngc 5460 sector", sector.HASector(vector3.Vector3(1503.62891, 482.09766, 1546.21484), 232.0, "NGC 5460 Sector")),
("ngc 5606 sector", sector.HASector(vector3.Vector3(4178.73438, 102.79297, 4149.66406), 52.0, "NGC 5606 Sector")),
("ngc 5617 sector", sector.HASector(vector3.Vector3(3553.99219, -8.72656, 3516.96875), 146.0, "NGC 5617 Sector")),
("ngc 5662 sector", sector.HASector(vector3.Vector3(1479.93750, 132.47656, 1581.49609), 190.0, "NGC 5662 Sector")),
("ngc 5822 sector", sector.HASector(vector3.Vector3(1849.48438, 187.74219, 2341.85156), 314.0, "NGC 5822 Sector")),
("ngc 5823 sector", sector.HASector(vector3.Vector3(2435.16797, 169.67969, 3028.73828), 136.0, "NGC 5823 Sector")),
("ngc 6025 sector", sector.HASector(vector3.Vector3(1426.48047, -258.18359, 1999.84961), 101.0, "NGC 6025 Sector")),
("ngc 6067 sector", sector.HASector(vector3.Vector3(2322.23828, -177.35156, 3990.00586), 189.0, "NGC 6067 Sector")),
("ngc 6087 sector", sector.HASector(vector3.Vector3(1543.78906, -273.85547, 2451.49414), 119.0, "NGC 6087 Sector")),
("ngc 6124 sector", sector.HASector(vector3.Vector3(546.19922, 174.56250, 1568.46875), 195.0, "NGC 6124 Sector")),
("ngc 6134 sector", sector.HASector(vector3.Vector3(1264.10547, -10.40234, 2698.57812), 53.0, "NGC 6134 Sector")),
("ngc 6152 sector", sector.HASector(vector3.Vector3(1528.39062, -181.70312, 2986.73828), 245.0, "NGC 6152 Sector")),
("ngc 6169 sector", sector.HASector(vector3.Vector3(1261.91016, 156.59375, 3357.25586), 105.0, "NGC 6169 Sector")),
("ngc 6167 sector", sector.HASector(vector3.Vector3(1508.11328, -81.90234, 3278.87109), 74.0, "NGC 6167 Sector")),
("ngc 6178 sector", sector.HASector(vector3.Vector3(1218.22656, 69.32031, 3076.88477), 49.0, "NGC 6178 Sector")),
("ngc 6193 sector", sector.HASector(vector3.Vector3(1490.62500, -105.26562, 3461.19336), 154.0, "NGC 6193 Sector")),
("ngc 6200 sector", sector.HASector(vector3.Vector3(2509.40234, -128.62109, 6210.98633), 234.0, "NGC 6200 Sector")),
("ngc 6208 sector", sector.HASector(vector3.Vector3(1056.18750, -309.23047, 2855.24805), 161.0, "NGC 6208 Sector")),
("ngc 6231 sector", sector.HASector(vector3.Vector3(1150.01172, 84.81641, 3882.36914), 165.0, "NGC 6231 Sector")),
("ngc 6242 sector", sector.HASector(vector3.Vector3(923.09375, 154.51953, 3569.33203), 97.0, "NGC 6242 Sector")),
("tr 24 sector", sector.HASector(vector3.Vector3(978.63281, 97.11719, 3577.28125), 500.0, "Tr 24 Sector")),
("ngc 6250 sector", sector.HASector(vector3.Vector3(926.94531, -88.57812, 2661.82812), 83.0, "NGC 6250 Sector")),
("ngc 6259 sector", sector.HASector(vector3.Vector3(1037.94141, -87.95312, 3194.45508), 118.0, "NGC 6259 Sector")),
("ngc 6281 sector", sector.HASector(vector3.Vector3(329.46484, 54.44141, 1523.83984), 37.0, "NGC 6281 Sector")),
("ngc 6322 sector", sector.HASector(vector3.Vector3(823.50781, -175.75781, 3139.01953), 48.0, "NGC 6322 Sector")),
("ic 4651 sector", sector.HASector(vector3.Vector3(977.73438, -398.58984, 2700.95703), 85.0, "IC 4651 Sector")),
("ngc 6383 sector", sector.HASector(vector3.Vector3(235.09375, 5.60156, 3201.37500), 187.0, "NGC 6383 Sector")),
("m6 sector", sector.HASector(vector3.Vector3(94.28906, -19.42578, 1587.08203), 93.0, "M6 Sector")),
("ngc 6416 sector", sector.HASector(vector3.Vector3(126.60547, -67.57031, 2415.74219), 99.0, "NGC 6416 Sector")),
("ic 4665 sector", sector.HASector(vector3.Vector3(-559.51953, 338.14453, 946.09570), 235.0, "IC 4665 Sector")),
("ngc 6425 sector", sector.HASector(vector3.Vector3(96.70312, -73.71484, 2637.19922), 77.0, "NGC 6425 Sector")),
("m7 sector", sector.HASector(vector3.Vector3(69.85156, -76.89062, 974.47852), 229.0, "M7 Sector")),
("m23 sector", sector.HASector(vector3.Vector3(-348.48438, 103.71484, 2017.50000), 179.0, "M23 Sector")),
("m20 sector", sector.HASector(vector3.Vector3(-324.17188, -9.28516, 2640.15625), 217.0, "M20 Sector")),
("ngc 6520 sector", sector.HASector(vector3.Vector3(-259.73828, -251.08594, 5127.28906), 90.0, "NGC 6520 Sector")),
("m21 sector", sector.HASector(vector3.Vector3(-526.55469, -27.43750, 3894.46875), 161.0, "M21 Sector")),
("ngc 6530 sector", sector.HASector(vector3.Vector3(-461.04688, -106.03516, 4314.13867), 177.0, "NGC 6530 Sector")),
("ngc 6546 sector", sector.HASector(vector3.Vector3(-388.70312, -74.76172, 3034.29102), 125.0, "NGC 6546 Sector")),
("ngc 6604 sector", sector.HASector(vector3.Vector3(-1735.61328, 164.05469, 5248.01172), 81.0, "NGC 6604 Sector")),
("m16 sector", sector.HASector(vector3.Vector3(-1666.35547, 79.58594, 5450.40625), 100.0, "M16 Sector")),
("m18 sector", sector.HASector(vector3.Vector3(-1037.49219, -73.82422, 4100.12891), 62.0, "M18 Sector")),
("m17 sector", sector.HASector(vector3.Vector3(-1104.42969, -59.19922, 4093.20508), 309.0, "M17 Sector")),
("ngc 6633 sector", sector.HASector(vector3.Vector3(-717.30078, 175.43359, 983.66602), 72.0, "NGC 6633 Sector")),
("m25 sector", sector.HASector(vector3.Vector3(-473.52344, -158.48828, 1957.30859), 177.0, "M25 Sector")),
("ngc 6664 sector", sector.HASector(vector3.Vector3(-1545.53906, -33.16016, 3471.33984), 166.0, "NGC 6664 Sector")),
("ic 4756 sector", sector.HASector(vector3.Vector3(-933.74219, 143.19922, 1266.49805), 184.0, "IC 4756 Sector")),
("m26 sector", sector.HASector(vector3.Vector3(-2112.12891, -264.09375, 4766.29297), 107.0, "M26 Sector")),
("ngc 6705 sector", sector.HASector(vector3.Vector3(-2803.58594, -298.96094, 5431.84570), 232.0, "NGC 6705 Sector")),
("ngc 6709 sector", sector.HASector(vector3.Vector3(-2349.81250, 287.60547, 2591.48047), 143.0, "NGC 6709 Sector")),
("col 394 sector", sector.HASector(vector3.Vector3(-566.87109, -371.35547, 2145.51953), 144.0, "Col 394 Sector")),
("steph 1 sector", sector.HASector(vector3.Vector3(-1125.68750, 339.39453, 480.14648), 74.0, "Steph 1 Sector")),
("ngc 6716 sector", sector.HASector(vector3.Vector3(-672.92188, -428.59375, 2443.02734), 100.0, "NGC 6716 Sector")),
("ngc 6755 sector", sector.HASector(vector3.Vector3(-2887.29297, -137.35547, 3616.84766), 189.0, "NGC 6755 Sector")),
("stock 1 sector", sector.HASector(vector3.Vector3(-902.64844, 41.73828, 514.86133), 243.0, "Stock 1 Sector")),
("ngc 6811 sector", sector.HASector(vector3.Vector3(-3810.01172, 816.57031, 706.14453), 162.0, "NGC 6811 Sector")),
("ngc 6819 sector", sector.HASector(vector3.Vector3(-7320.41406, 1138.13281, 2099.09570), 112.0, "NGC 6819 Sector")),
("ngc 6823 sector", sector.HASector(vector3.Vector3(-5310.76953, -10.76953, 3140.78125), 108.0, "NGC 6823 Sector")),
("ngc 6830 sector", sector.HASector(vector3.Vector3(-4635.60938, -168.04688, 2665.59375), 187.0, "NGC 6830 Sector")),
("ngc 6834 sector", sector.HASector(vector3.Vector3(-6141.51172, 141.15234, 2772.99805), 99.0, "NGC 6834 Sector")),
("ngc 6866 sector", sector.HASector(vector3.Vector3(-4616.57812, 560.05078, 863.96875), 138.0, "NGC 6866 Sector")),
("ngc 6871 sector", sector.HASector(vector3.Vector3(-4891.96484, 187.98047, 1533.04883), 448.0, "NGC 6871 Sector")),
("ngc 6885 sector", sector.HASector(vector3.Vector3(-1769.88281, -139.42188, 806.58203), 57.0, "NGC 6885 Sector")),
("ic 4996 sector", sector.HASector(vector3.Vector3(-5466.14844, 128.18359, 1423.82617), 83.0, "IC 4996 Sector")),
("mel 227 sector", sector.HASector(vector3.Vector3(238.19531, -198.52734, 236.53906), 57.0, "Mel 227 Sector")),
("ngc 6910 sector", sector.HASector(vector3.Vector3(-3635.86328, 129.47656, 726.51758), 108.0, "NGC 6910 Sector")),
("m29 sector", sector.HASector(vector3.Vector3(-3642.46875, 39.16406, 847.62891), 109.0, "M29 Sector")),
("ngc 6939 sector", sector.HASector(vector3.Vector3(-3751.41797, 822.29688, -387.67188), 113.0, "NGC 6939 Sector")),
("ngc 6940 sector", sector.HASector(vector3.Vector3(-2338.53906, -314.58594, 855.78320), 183.0, "NGC 6940 Sector")),
("ngc 7039 sector", sector.HASector(vector3.Vector3(-3096.74609, -91.96484, 108.14062), 127.0, "NGC 7039 Sector")),
("ngc 7063 sector", sector.HASector(vector3.Vector3(-2200.44141, -386.83984, 266.28320), 59.0, "NGC 7063 Sector")),
("ngc 7082 sector", sector.HASector(vector3.Vector3(-4692.53125, -245.98047, -98.29492), 342.0, "NGC 7082 Sector")),
("m39 sector", sector.HASector(vector3.Vector3(-1058.13672, -42.53906, -46.19922), 93.0, "M39 Sector")),
("ic 1396 sector", sector.HASector(vector3.Vector3(-2678.65234, 175.52734, -438.64648), 500.0, "IC 1396 Sector")),
("ic 5146 sector", sector.HASector(vector3.Vector3(-2759.04688, -266.45312, -212.29688), 73.0, "IC 5146 Sector")),
("ngc 7160 sector", sector.HASector(vector3.Vector3(-2478.12109, 286.47656, -617.86523), 38.0, "NGC 7160 Sector")),
("ngc 7209 sector", sector.HASector(vector3.Vector3(-3761.71875, -484.11719, -362.21289), 200.0, "NGC 7209 Sector")),
("ngc 7235 sector", sector.HASector(vector3.Vector3(-8983.79688, 128.58984, -2024.58594), 134.0, "NGC 7235 Sector")),
("ngc 7243 sector", sector.HASector(vector3.Vector3(-2595.76562, -257.61719, -406.48633), 223.0, "NGC 7243 Sector")),
("ngc 7380 sector", sector.HASector(vector3.Vector3(-6928.64453, -113.87891, -2131.52930), 422.0, "NGC 7380 Sector")),
("ngc 7510 sector", sector.HASector(vector3.Vector3(-6320.33984, 0.00000, -2426.15039), 99.0, "NGC 7510 Sector")),
("m52 sector", sector.HASector(vector3.Vector3(-4268.12109, 32.32422, -1794.15430), 203.0, "M52 Sector")),
("ngc 7686 sector", sector.HASector(vector3.Vector3(-3010.24609, -655.51562, -1065.98438), 133.0, "NGC 7686 Sector")),
("ngc 7789 sector", sector.HASector(vector3.Vector3(-6847.17578, -717.10547, -3265.93555), 555.0, "NGC 7789 Sector")),
("ngc 7790 sector", sector.HASector(vector3.Vector3(-8582.57422, -167.54297, -4297.83203), 336.0, "NGC 7790 Sector")),
("ic 410 sector", sector.HASector(vector3.Vector3(-1225.55469, -345.51953, -10926.05273), 150.0, "IC 410 Sector")),
("ngc 3603 sector", sector.HASector(vector3.Vector3(18594.82031, -174.53125, 7362.21094), 150.0, "NGC 3603 Sector")),
("ngc 7822 sector", sector.HASector(vector3.Vector3(-2443.97266, 302.39844, -1332.49805), 100.0, "NGC 7822 Sector")),
("ngc 281 sector", sector.HASector(vector3.Vector3(-6661.27734, -877.87500, -4342.43164), 100.0, "NGC 281 Sector")),
("lbn 623 sector", sector.HASector(vector3.Vector3(-499.50781, -18.84766, -331.87109), 100.0, "LBN 623 Sector")),
("heart sector", sector.HASector(vector3.Vector3(-5321.12500, 117.80469, -5284.10547), 100.0, "Heart Sector")),
("soul sector", sector.HASector(vector3.Vector3(-5095.17969, 117.80469, -5502.29492), 100.0, "Soul Sector")),
("pleiades sector", sector.HASector(vector3.Vector3(-81.75391, -149.41406, -343.34766), 100.0, "Pleiades Sector")),
("perseus dark region", sector.HASector(vector3.Vector3(-359.89844, -316.98438, -1045.22461), 100.0, "Perseus Dark Region")),
("ngc 1333 sector", sector.HASector(vector3.Vector3(-381.21094, -383.42969, -957.94531), 100.0, "NGC 1333 Sector")),
("california sector", sector.HASector(vector3.Vector3(-332.56641, -213.03125, -918.70508), 100.0, "California Sector")),
("ngc 1491 sector", sector.HASector(vector3.Vector3(-4908.28906, -174.52344, -8710.81152), 100.0, "NGC 1491 Sector")),
("hind sector", sector.HASector(vector3.Vector3(-32.95312, -206.39062, -557.28516), 100.0, "Hind Sector")),
("trifid of the north sector", sector.HASector(vector3.Vector3(-643.14844, -402.24609, -2486.87695), 100.0, "Trifid of the North Sector")),
("flaming star sector", sector.HASector(vector3.Vector3(-233.46875, -68.22266, -1682.50977), 100.0, "Flaming Star Sector")),
("ngc 1931 sector", sector.HASector(vector3.Vector3(-743.83984, 36.65234, -6960.26953), 100.0, "NGC 1931 Sector")),
("crab sector", sector.HASector(vector3.Vector3(558.51953, -707.39453, -6941.73242), 100.0, "Crab Sector")),
("running man sector", sector.HASector(vector3.Vector3(586.15625, -425.38281, -1079.56836), 100.0, "Running Man Sector")),
("orion sector", sector.HASector(vector3.Vector3(616.52344, -446.42578, -1107.67383), 100.0, "Orion Sector")),
("col 359 sector", sector.HASector(vector3.Vector3(-393.00781, 175.31641, 686.22852), 566.0, "Col 359 Sector")),
("spirograph sector", sector.HASector(vector3.Vector3(577.89844, -452.66406, -819.22266), 100.0, "Spirograph Sector")),
("ngc 1999 sector", sector.HASector(vector3.Vector3(549.36719, -374.51172, -926.56445), 100.0, "NGC 1999 Sector")),
("flame sector", sector.HASector(vector3.Vector3(428.26172, -280.66797, -858.96289), 100.0, "Flame Sector")),
("horsehead sector", sector.HASector(vector3.Vector3(411.68359, -272.99219, -811.47461), 100.0, "Horsehead Sector")),
("witch head sector", sector.HASector(vector3.Vector3(369.41406, -401.57812, -715.72852), 100.0, "Witch Head Sector")),
("monkey head sector", sector.HASector(vector3.Vector3(1133.31641, 44.67969, -6298.69922), 100.0, "Monkey Head Sector")),
("jellyfish sector", sector.HASector(vector3.Vector3(789.77734, 252.96484, -4930.74609), 100.0, "Jellyfish Sector")),
("rosette sector", sector.HASector(vector3.Vector3(2346.98438, -175.72266, -4748.76562), 100.0, "Rosette Sector")),
("hubble's variable sector", sector.HASector(vector3.Vector3(1210.32422, 68.06250, -2744.17188), 100.0, "Hubble's Variable Sector")),
("cone sector", sector.HASector(vector3.Vector3(855.44141, 84.45312, -2025.11328), 100.0, "Cone Sector")),
("seagull sector", sector.HASector(vector3.Vector3(2656.38672, -159.12891, -2712.61523), 100.0, "Seagull Sector")),
("thor's helmet sector", sector.HASector(vector3.Vector3(2704.18750, -19.17578, -2469.26172), 100.0, "Thor's Helmet Sector")),
("skull and crossbones neb. sector", sector.HASector(vector3.Vector3(13388.46094, 104.71875, -6762.99805), 100.0, "Skull and Crossbones Neb. Sector")),
("pencil sector", sector.HASector(vector3.Vector3(813.80078, 2.84375, -44.07422), 100.0, "Pencil Sector")),
("ngc 3199 sector", sector.HASector(vector3.Vector3(14577.19531, -261.78516, 3526.59375), 100.0, "NGC 3199 Sector")),
("eta carina sector", sector.HASector(vector3.Vector3(8582.39453, -141.36719, 2706.01758), 100.0, "Eta Carina Sector")),
("statue of liberty sector", sector.HASector(vector3.Vector3(5589.73047, -73.30078, 2179.34375), 100.0, "Statue of Liberty Sector")),
("ngc 5367 sector", sector.HASector(vector3.Vector3(1348.62500, 755.99219, 1421.15430), 100.0, "NGC 5367 Sector")),
("ngc 6188 sector", sector.HASector(vector3.Vector3(1704.75391, -84.46875, 4055.45117), 100.0, "NGC 6188 Sector")),
("cat's paw sector", sector.HASector(vector3.Vector3(850.85938, 57.59375, 5433.48047), 100.0, "Cat's Paw Sector")),
("ngc 6357 sector", sector.HASector(vector3.Vector3(964.84375, 142.23828, 8091.43555), 100.0, "NGC 6357 Sector")),
("trifid sector", sector.HASector(vector3.Vector3(-633.71094, -27.22656, 5161.16992), 100.0, "Trifid Sector")),
("lagoon sector", sector.HASector(vector3.Vector3(-470.27344, -94.24219, 4474.36719), 100.0, "Lagoon Sector")),
("eagle sector", sector.HASector(vector3.Vector3(-2046.40234, 97.73438, 6693.48047), 100.0, "Eagle Sector")),
("omega sector", sector.HASector(vector3.Vector3(-1432.63672, -76.79297, 5309.58203), 100.0, "Omega Sector")),
("b133 sector", sector.HASector(vector3.Vector3(-474.18359, -111.46875, 873.33984), 100.0, "B133 Sector")),
("ic 1287 sector", sector.HASector(vector3.Vector3(-358.35547, -8.72656, 933.54492), 100.0, "IC 1287 Sector")),
("r cra sector", sector.HASector(vector3.Vector3(0.00000, -128.39062, 399.89453), 100.0, "R CrA Sector")),
("ngc 6820 sector", sector.HASector(vector3.Vector3(-5577.41406, -11.34375, 3338.01367), 100.0, "NGC 6820 Sector")),
("crescent sector", sector.HASector(vector3.Vector3(-4836.49219, 209.37891, 1250.80273), 100.0, "Crescent Sector")),
("sadr region sector", sector.HASector(vector3.Vector3(-1794.68359, 53.71094, 365.84961), 100.0, "Sadr Region Sector")),
("veil west sector", sector.HASector(vector3.Vector3(-1395.62891, -194.41797, 418.70898), 100.0, "Veil West Sector")),
("north america sector", sector.HASector(vector3.Vector3(-1893.85547, -33.16016, 149.04883), 100.0, "North America Sector")),
("b352 sector", sector.HASector(vector3.Vector3(-1896.42969, 9.94922, 115.99023), 100.0, "B352 Sector")),
("pelican sector", sector.HASector(vector3.Vector3(-1891.56641, 3.31641, 178.80469), 100.0, "Pelican Sector")),
("veil east sector", sector.HASector(vector3.Vector3(-1914.36328, -305.97266, 491.52539), 100.0, "Veil East Sector")),
("iris sector", sector.HASector(vector3.Vector3(-1410.35547, 367.96094, -354.25781), 100.0, "Iris Sector")),
("elephant's trunk sector", sector.HASector(vector3.Vector3(-2658.95703, 174.23828, -435.41992), 100.0, "Elephant's Trunk Sector")),
("cocoon sector", sector.HASector(vector3.Vector3(-3175.87891, -306.70703, -244.37109), 100.0, "Cocoon Sector")),
("cave sector", sector.HASector(vector3.Vector3(-2250.06641, 108.87109, -827.86328), 100.0, "Cave Sector")),
("ngc 7538 sector", sector.HASector(vector3.Vector3(-8372.94141, 125.66016, -3298.18945), 100.0, "NGC 7538 Sector")),
("bubble sector", sector.HASector(vector3.Vector3(-6573.64062, 24.78516, -2682.65234), 100.0, "Bubble Sector")),
("aries dark region", sector.HASector(vector3.Vector3(-93.57031, -184.53516, -257.08398), 100.0, "Aries Dark Region")),
("taurus dark region", sector.HASector(vector3.Vector3(-62.37891, -103.47656, -443.84766), 100.0, "Taurus Dark Region")),
("orion dark region", sector.HASector(vector3.Vector3(596.77344, -311.86719, -1340.37305), 100.0, "Orion Dark Region")),
("messier 78 sector", sector.HASector(vector3.Vector3(665.03125, -395.19922, -1400.55469), 100.0, "Messier 78 Sector")),
("barnard's loop sector", sector.HASector(vector3.Vector3(726.50391, -365.36328, -1377.93555), 100.0, "Barnard's Loop Sector")),
("puppis dark region", sector.HASector(vector3.Vector3(1440.26562, -286.21484, -306.13672), 100.0, "Puppis Dark Region")),
("puppis dark region b sector", sector.HASector(vector3.Vector3(1352.29688, 0.00000, -362.34570), 100.0, "Puppis Dark Region B Sector")),
("vela dark region", sector.HASector(vector3.Vector3(991.18750, -121.87109, -51.94531), 100.0, "Vela Dark Region")),
("musca dark region", sector.HASector(vector3.Vector3(415.92578, -68.19531, 249.91211), 100.0, "Musca Dark Region")),
("coalsack sector", sector.HASector(vector3.Vector3(418.85938, -0.87109, 273.05078), 100.0, "Coalsack Sector")),
("chamaeleon sector", sector.HASector(vector3.Vector3(483.30078, -152.70312, 301.99805), 100.0, "Chamaeleon Sector")),
("coalsack dark region", sector.HASector(vector3.Vector3(450.26562, -9.07422, 259.96094), 100.0, "Coalsack Dark Region")),
("lupus dark region b sector", sector.HASector(vector3.Vector3(173.39062, 81.61328, 429.15625), 100.0, "Lupus Dark Region B Sector")),
("lupus dark region", sector.HASector(vector3.Vector3(158.46484, 126.79297, 412.81055), 100.0, "Lupus Dark Region")),
("scorpius dark region", sector.HASector(vector3.Vector3(110.22656, 0.00000, 477.44141), 100.0, "Scorpius Dark Region")),
("ic 4604 sector", sector.HASector(vector3.Vector3(62.72266, 182.41797, 568.14453), 100.0, "IC 4604 Sector")),
("pipe (stem) sector", sector.HASector(vector3.Vector3(12.15234, 51.39453, 497.20312), 100.0, "Pipe (stem) Sector")),
("ophiuchus dark region b sector", sector.HASector(vector3.Vector3(-42.85156, 169.29688, 489.79883), 100.0, "Ophiuchus Dark Region B Sector")),
("scutum dark region", sector.HASector(vector3.Vector3(-274.66016, 11.34375, 589.00977), 100.0, "Scutum Dark Region")),
("b92 sector", sector.HASector(vector3.Vector3(-142.89062, -6.80859, 634.06250), 100.0, "B92 Sector")),
("snake sector", sector.HASector(vector3.Vector3(-18.70703, 73.12109, 595.23438), 100.0, "Snake Sector")),
("pipe (bowl) sector", sector.HASector(vector3.Vector3(-11.31250, 36.61719, 498.52930), 100.0, "Pipe (bowl) Sector")),
("ophiuchus dark region c sector", sector.HASector(vector3.Vector3(-9.00781, 63.37109, 516.04492), 100.0, "Ophiuchus Dark Region C Sector")),
("rho ophiuchi sector", sector.HASector(vector3.Vector3(52.26953, 152.01562, 473.45508), 100.0, "Rho Ophiuchi Sector")),
("ophiuchus dark region", sector.HASector(vector3.Vector3(43.33984, 152.03516, 495.38672), 100.0, "Ophiuchus Dark Region")),
("corona austr. dark region", sector.HASector(vector3.Vector3(-8.52734, -177.85156, 488.56641), 100.0, "Corona Austr. Dark Region")),
("aquila dark region", sector.HASector(vector3.Vector3(-719.23047, -17.45312, 694.55273), 100.0, "Aquila Dark Region")),
("vulpecula dark region", sector.HASector(vector3.Vector3(-543.80859, 45.33984, 353.15234), 100.0, "Vulpecula Dark Region")),
("cepheus dark region", sector.HASector(vector3.Vector3(-1373.48438, 243.10938, -120.16406), 100.0, "Cepheus Dark Region")),
("cepheus dark region b sector", sector.HASector(vector3.Vector3(-945.42578, 241.92188, -218.26953), 100.0, "Cepheus Dark Region B Sector")),
("horsehead dark region", sector.HASector(vector3.Vector3(608.46094, -404.64453, -1194.16992), 200.0, "Horsehead Dark Region")),
("parrot's head sector", sector.HASector(vector3.Vector3(19.11719, -90.63281, 995.70117), 100.0, "Parrot's Head Sector")),
("struve's lost sector", sector.HASector(vector3.Vector3(-30.95703, -178.36719, -466.07617), 100.0, "Struve's Lost Sector")),
("bow-tie sector", sector.HASector(vector3.Vector3(-2985.95312, 601.75000, -1723.94141), 100.0, "Bow-Tie Sector")),
("skull sector", sector.HASector(vector3.Vector3(-369.61719, -1543.29297, -204.04102), 100.0, "Skull Sector")),
("little dumbbell sector", sector.HASector(vector3.Vector3(-1560.71484, -382.69531, -1351.93164), 100.0, "Little Dumbbell Sector")),
("ic 289 sector", sector.HASector(vector3.Vector3(-1118.43359, 83.04297, -1277.57812), 100.0, "IC 289 Sector")),
("ngc 1360 sector", sector.HASector(vector3.Vector3(437.24219, -925.14844, -513.75586), 100.0, "NGC 1360 Sector")),
("ngc 1501 sector", sector.HASector(vector3.Vector3(-2071.58984, 413.77344, -2915.01367), 100.0, "NGC 1501 Sector")),
("ngc 1514 sector", sector.HASector(vector3.Vector3(-202.23438, -218.68750, -807.39844), 100.0, "NGC 1514 Sector")),
("ngc 1535 sector", sector.HASector(vector3.Vector3(1422.89844, -2733.25000, -2853.89062), 100.0, "NGC 1535 Sector")),
("ngc 2022 sector", sector.HASector(vector3.Vector3(2934.63281, -1966.59375, -9781.63867), 100.0, "NGC 2022 Sector")),
("ic 2149 sector", sector.HASector(vector3.Vector3(-1688.68359, 1312.09766, -6875.08203), 100.0, "IC 2149 Sector")),
("ic 2165 sector", sector.HASector(vector3.Vector3(9024.47656, -3006.29297, -10272.34375), 100.0, "IC 2165 Sector")),
("butterfly sector", sector.HASector(vector3.Vector3(1747.16797, 188.37109, -2431.44336), 100.0, "Butterfly Sector")),
("ngc 2371/2 sector", sector.HASector(vector3.Vector3(661.47266, 1497.67188, -4084.04688), 100.0, "NGC 2371/2 Sector")),
("eskimo sector", sector.HASector(vector3.Vector3(234.63281, 239.23438, -726.43945), 100.0, "Eskimo Sector")),
("ngc 2438 sector", sector.HASector(vector3.Vector3(2508.30469, 228.79297, -1973.84180), 100.0, "NGC 2438 Sector")),
("ngc 2440 sector", sector.HASector(vector3.Vector3(4653.64062, 238.69141, -3282.78125), 100.0, "NGC 2440 Sector")),
("ngc 2452 sector", sector.HASector(vector3.Vector3(9387.19141, -183.25000, -4700.75391), 100.0, "NGC 2452 Sector")),
("ic 2448 sector", sector.HASector(vector3.Vector3(8457.82422, -2355.25391, 2393.32227), 100.0, "IC 2448 Sector")),
("ngc 2792 sector", sector.HASector(vector3.Vector3(8157.05078, 586.27734, -599.01562), 100.0, "NGC 2792 Sector")),
("ngc 2818 sector", sector.HASector(vector3.Vector3(8322.63672, 1271.05078, -1169.66992), 100.0, "NGC 2818 Sector")),
("ngc 2867 sector", sector.HASector(vector3.Vector3(12208.21094, -1274.62891, 1759.23047), 100.0, "NGC 2867 Sector")),
("ngc 2899 sector", sector.HASector(vector3.Vector3(6434.56641, -430.78125, 812.87500), 100.0, "NGC 2899 Sector")),
("ic 2501 sector", sector.HASector(vector3.Vector3(18754.05469, -1906.93750, 3645.41797), 100.0, "IC 2501 Sector")),
("eight burst sector", sector.HASector(vector3.Vector3(2049.63281, 450.94531, 75.15625), 100.0, "Eight Burst Sector")),
("ic 2553 sector", sector.HASector(vector3.Vector3(12855.33984, -1261.05078, 3565.10156), 100.0, "IC 2553 Sector")),
("ngc 3195 sector", sector.HASector(vector3.Vector3(4656.55469, -1895.47656, 2331.83008), 100.0, "NGC 3195 Sector")),
("ngc 3211 sector", sector.HASector(vector3.Vector3(8797.93750, -785.83594, 2572.69727), 100.0, "NGC 3211 Sector")),
("ghost of jupiter sector", sector.HASector(vector3.Vector3(1171.69141, 743.95703, -183.48242), 100.0, "Ghost of Jupiter Sector")),
("ic 2621 sector", sector.HASector(vector3.Vector3(14360.99219, -1297.00781, 5685.91992), 100.0, "IC 2621 Sector")),
("owl sector", sector.HASector(vector3.Vector3(-624.37891, 1847.16406, -1018.89062), 100.0, "Owl Sector")),
("ngc 3699 sector", sector.HASector(vector3.Vector3(4150.35156, 102.09375, 1736.13086), 100.0, "NGC 3699 Sector")),
("blue planetary sector", sector.HASector(vector3.Vector3(4527.26562, 409.69141, 2082.31055), 100.0, "Blue planetary Sector")),
("ngc 4361 sector", sector.HASector(vector3.Vector3(3106.92969, 3241.21094, 1389.79688), 100.0, "NGC 4361 Sector")),
("lemon slice sector", sector.HASector(vector3.Vector3(-3085.35938, 2548.82812, -2057.67773), 100.0, "Lemon Slice Sector")),
("ic 4191 sector", sector.HASector(vector3.Vector3(11811.59375, -1204.96094, 8148.27148), 100.0, "IC 4191 Sector")),
("spiral planetary sector", sector.HASector(vector3.Vector3(1415.32812, -105.56641, 1074.29297), 100.0, "Spiral Planetary Sector")),
("ngc 5307 sector", sector.HASector(vector3.Vector3(5879.41797, 1490.00781, 5368.64453), 100.0, "NGC 5307 Sector")),
("ngc 5315 sector", sector.HASector(vector3.Vector3(6499.57812, -644.44141, 5282.06250), 100.0, "NGC 5315 Sector")),
("retina sector", sector.HASector(vector3.Vector3(1867.97656, 811.80078, 2202.64258), 100.0, "Retina Sector")),
("ngc 5873 sector", sector.HASector(vector3.Vector3(13791.82031, 8670.95312, 25191.27344), 100.0, "NGC 5873 Sector")),
("ngc 5882 sector", sector.HASector(vector3.Vector3(4616.64062, 1543.22656, 7331.10352), 100.0, "NGC 5882 Sector")),
("ngc 5979 sector", sector.HASector(vector3.Vector3(5443.01172, -831.33594, 7119.16406), 100.0, "NGC 5979 Sector")),
("fine ring sector", sector.HASector(vector3.Vector3(513.22656, 34.89844, 857.54297), 100.0, "Fine Ring Sector")),
("ngc 6058 sector", sector.HASector(vector3.Vector3(-5472.94922, 6794.40625, 2587.05273), 100.0, "NGC 6058 Sector")),
("white eyed pea sector", sector.HASector(vector3.Vector3(-3882.09375, 7841.04688, 8212.63281), 100.0, "White Eyed Pea Sector")),
("ngc 6153 sector", sector.HASector(vector3.Vector3(1670.20703, 508.18359, 5110.00586), 100.0, "NGC 6153 Sector")),
("ngc 6210 sector", sector.HASector(vector3.Vector3(-2861.42969, 3248.40625, 3057.78906), 100.0, "NGC 6210 Sector")),
("ic 4634 sector", sector.HASector(vector3.Vector3(-51.17578, 1584.93750, 7330.44141), 100.0, "IC 4634 Sector")),
("bug sector", sector.HASector(vector3.Vector3(619.48828, 65.26953, 3342.45117), 100.0, "Bug Sector")),
("box sector", sector.HASector(vector3.Vector3(-1759.31250, 2758.81250, 10292.41406), 100.0, "Box Sector")),
("ngc 6326 sector", sector.HASector(vector3.Vector3(4041.22266, -1606.91406, 10103.77734), 100.0, "NGC 6326 Sector")),
("ngc 6337 sector", sector.HASector(vector3.Vector3(901.19531, -94.06641, 4815.49609), 100.0, "NGC 6337 Sector")),
("little ghost sector", sector.HASector(vector3.Vector3(-204.10547, 503.68359, 4869.76758), 100.0, "Little Ghost Sector")),
("ic 4663 sector", sector.HASector(vector3.Vector3(1523.71094, -927.08984, 6250.50586), 100.0, "IC 4663 Sector")),
("ngc 6445 sector", sector.HASector(vector3.Vector3(-632.58594, 306.07031, 4444.78906), 100.0, "NGC 6445 Sector")),
("cat's eye sector", sector.HASector(vector3.Vector3(-2809.64062, 1626.06641, -320.11719), 100.0, "Cat's Eye Sector")),
("ic 4673 sector", sector.HASector(vector3.Vector3(-840.65625, -561.13281, 13361.82812), 100.0, "IC 4673 Sector")),
("red spider sector", sector.HASector(vector3.Vector3(-526.06250, 36.65234, 2953.28906), 100.0, "Red Spider Sector")),
("ngc 6565 sector", sector.HASector(vector3.Vector3(-359.02734, -473.17188, 5870.02539), 100.0, "NGC 6565 Sector")),
("ngc 6563 sector", sector.HASector(vector3.Vector3(80.49219, -393.89844, 3073.81836), 100.0, "NGC 6563 Sector")),
("ngc 6572 sector", sector.HASector(vector3.Vector3(-4333.99219, 1608.39453, 6282.48047), 100.0, "NGC 6572 Sector")),
("ngc 6567 sector", sector.HASector(vector3.Vector3(-851.64453, -51.31250, 4112.42969), 100.0, "NGC 6567 Sector")),
("ic 4699 sector", sector.HASector(vector3.Vector3(4137.37891, -4924.67578, 19464.83203), 100.0, "IC 4699 Sector")),
("ngc 6629 sector", sector.HASector(vector3.Vector3(-1041.14844, -568.92188, 6289.06445), 100.0, "NGC 6629 Sector")),
("ngc 6644 sector", sector.HASector(vector3.Vector3(-1420.00781, -1245.23438, 9616.28516), 100.0, "NGC 6644 Sector")),
("ic 4776 sector", sector.HASector(vector3.Vector3(-855.50781, -5561.94922, 23330.94141), 100.0, "IC 4776 Sector")),
("ring sector", sector.HASector(vector3.Vector3(-1977.24219, 552.30859, 998.77734), 100.0, "Ring Sector")),
("phantom streak sector", sector.HASector(vector3.Vector3(-3611.90625, -306.19141, 5395.40234), 100.0, "Phantom Streak Sector")),
("ngc 6751 sector", sector.HASector(vector3.Vector3(-3105.76172, -657.87109, 5557.10742), 100.0, "NGC 6751 Sector")),
("ic 4846 sector", sector.HASector(vector3.Vector3(-11325.47656, -4178.53516, 21663.64062), 100.0, "IC 4846 Sector")),
("ic 1297 sector", sector.HASector(vector3.Vector3(215.14844, -2871.37109, 7249.06445), 100.0, "IC 1297 Sector")),
("ngc 6781 sector", sector.HASector(vector3.Vector3(-3394.65625, -266.91406, 3796.71680), 100.0, "NGC 6781 Sector")),
("ngc 6790 sector", sector.HASector(vector3.Vector3(-2014.89844, -362.12500, 2588.25195), 100.0, "NGC 6790 Sector")),
("ngc 6803 sector", sector.HASector(vector3.Vector3(-4117.21484, -407.53516, 3920.77148), 100.0, "NGC 6803 Sector")),
("ngc 6804 sector", sector.HASector(vector3.Vector3(-3573.00781, -400.99609, 3474.59766), 100.0, "NGC 6804 Sector")),
("little gem sector", sector.HASector(vector3.Vector3(-2493.94922, -1844.14062, 5136.08398), 100.0, "Little Gem Sector")),
("blinking sector", sector.HASector(vector3.Vector3(-1938.14453, 443.09766, 217.39844), 100.0, "Blinking Sector")),
("ngc 6842 sector", sector.HASector(vector3.Vector3(-5476.70312, 62.83203, 2449.84766), 100.0, "NGC 6842 Sector")),
("dumbbell sector", sector.HASector(vector3.Vector3(-958.21094, -70.98438, 535.52734), 100.0, "Dumbbell Sector")),
("ngc 6852 sector", sector.HASector(vector3.Vector3(-3276.57812, -1251.89844, 3563.25391), 100.0, "NGC 6852 Sector")),
("ngc 6884 sector", sector.HASector(vector3.Vector3(-2457.28516, 309.00391, 340.97656), 100.0, "NGC 6884 Sector")),
("ngc 6879 sector", sector.HASector(vector3.Vector3(-17024.14453, -3171.56250, 10971.31250), 100.0, "NGC 6879 Sector")),
("ngc 6886 sector", sector.HASector(vector3.Vector3(-7731.72266, -1205.87500, 4445.93750), 100.0, "NGC 6886 Sector")),
("ngc 6891 sector", sector.HASector(vector3.Vector3(-6740.87891, -1781.75781, 4861.67578), 100.0, "NGC 6891 Sector")),
("ic 4997 sector", sector.HASector(vector3.Vector3(-6681.43359, -1526.47266, 4126.53711), 100.0, "IC 4997 Sector")),
("blue flash sector", sector.HASector(vector3.Vector3(-2599.53125, 500.30469, 1411.42969), 100.0, "Blue Flash Sector")),
("fetus sector", sector.HASector(vector3.Vector3(-2881.56641, 277.95312, -171.19727), 100.0, "Fetus Sector")),
("saturn sector", sector.HASector(vector3.Vector3(-2623.43359, -2952.78906, 3382.10742), 100.0, "Saturn Sector")),
("ngc 7026 sector", sector.HASector(vector3.Vector3(-5998.94141, 41.88672, 104.71094), 100.0, "NGC 7026 Sector")),
("ngc 7027 sector", sector.HASector(vector3.Vector3(-3380.22266, -207.56641, 301.67773), 100.0, "NGC 7027 Sector")),
("ngc 7048 sector", sector.HASector(vector3.Vector3(-5596.30859, -166.13281, 117.22656), 100.0, "NGC 7048 Sector")),
("ic 5117 sector", sector.HASector(vector3.Vector3(-2988.11719, -266.68359, 5.21484), 100.0, "IC 5117 Sector")),
("ic 5148 sector", sector.HASector(vector3.Vector3(-86.22656, -2376.86719, 1828.40430), 100.0, "IC 5148 Sector")),
("ic 5217 sector", sector.HASector(vector3.Vector3(-9198.58594, -884.61719, -1721.46875), 100.0, "IC 5217 Sector")),
("helix sector", sector.HASector(vector3.Vector3(-222.85938, -583.28516, 304.50195), 100.0, "Helix Sector")),
("ngc 7354 sector", sector.HASector(vector3.Vector3(-3995.72266, 168.55469, -1282.88672), 100.0, "NGC 7354 Sector")),
("blue snowball sector", sector.HASector(vector3.Vector3(-5024.05469, -1663.03516, -1497.73438), 100.0, "Blue Snowball Sector")),
("g2 dust cloud sector", sector.HASector(vector3.Vector3(27.12500, -22.49609, 27899.97656), 100.0, "G2 Dust Cloud Sector")),
("regor sector", sector.HASector(vector3.Vector3(1099.23828, -146.67188, -133.58008), 100.0, "Regor Sector")),
# These cluster coords are fake, and are a fudge to give the right origins for generating ICZ's PG names
("icz", sector.HASectorCluster(vector3.Vector3(60, -120, 55), 100, 40, "ICZ", [
# The following coords/radii are the real spheres that make up ICZ
sector.HASector(vector3.Vector3(11, -118, 56), 40, "ICZ"),
sector.HASector(vector3.Vector3(17, -122, 32), 40, "ICZ"),
sector.HASector(vector3.Vector3(32, -170, 13), 40, "ICZ"),
sector.HASector(vector3.Vector3(34, -115, 100), 40, "ICZ"),
sector.HASector(vector3.Vector3(45, -118, 85), 40, "ICZ"),
sector.HASector(vector3.Vector3(53, -130, 14), 40, "ICZ"),
sector.HASector(vector3.Vector3(62, -105, 22), 40, "ICZ"),
sector.HASector(vector3.Vector3(65, -117, 47), 40, "ICZ"),
sector.HASector(vector3.Vector3(67, -119, 24), 40, "ICZ"),
sector.HASector(vector3.Vector3(75, -135, 19), 40, "ICZ"),
sector.HASector(vector3.Vector3(78, -100, 16), 40, "ICZ"),
sector.HASector(vector3.Vector3(79, -167, 25), 40, "ICZ"),
sector.HASector(vector3.Vector3(81, -150, 96), 40, "ICZ"),
sector.HASector(vector3.Vector3(82, -131, 0), 40, "ICZ"),
sector.HASector(vector3.Vector3(92, -95, 11), 40, "ICZ"),
sector.HASector(vector3.Vector3(106, -95, 0), 40, "ICZ"),
])),
# Permit regions
("bleia1", sector.HASector(vector3.Vector3(-43, 155, 37500), 512, "Bleia1")),
("bleia2", sector.HASector(vector3.Vector3(-43, 155, 37000), 512, "Bleia2")),
("bleia3", sector.HASector(vector3.Vector3(-43, 155, 36500), 512, "Bleia3")),
("bleia4", sector.HASector(vector3.Vector3(450, 155, 37000), 512, "Bleia4")),
("bleia5", sector.HASector(vector3.Vector3(-450, 155, 37000), 512, "Bleia5")),
("bovomit", sector.HASector(vector3.Vector3(-20070, 90, -6930), 512, "Bovomit")),
("dryman", sector.HASector(vector3.Vector3(19100, 20, 21160), 512, "Dryman")),
("froadik", sector.HASector(vector3.Vector3(-18860, -200, 14300), 512, "Froadik")),
("hyponia", sector.HASector(vector3.Vector3(-23020, -10, 24080), 512, "Hyponia")),
("praei1", sector.HASector(vector3.Vector3(-1000, -155, 54000), 512, "Praei1")),
("praei2", sector.HASector(vector3.Vector3(-1000, -155, 54400), 512, "Praei2")),
("praei3", sector.HASector(vector3.Vector3(-1000, -155, 53600), 512, "Praei3")),
("praei4", sector.HASector(vector3.Vector3(-1000, -555, 54000), 512, "Praei4")),
("praei5", sector.HASector(vector3.Vector3(-1000, 455, 54000), 512, "Praei5")),
("praei6", sector.HASector(vector3.Vector3(-500, -100, 53500), 512, "Praei6")),
("sidgoir", sector.HASector(vector3.Vector3(-24120, 10, -1220), 100, "Sidgoir")),
])
# Sort by increasing size for checks, so smaller sectors are checked first
# NOTE: This relies on behaviour of OrderedDict whereby if the sort key is
# equal (i.e. sectors of identical size) the existing order is retained
ha_sectors = collections.OrderedDict(sorted(ha_sectors.items(), key=lambda t: t[1].size))
|
KayJohnston/jackies-map
|
pgdata.py
|
Python
|
bsd-3-clause
| 62,783
|
[
"Galaxy"
] |
0458542664aee1256fd9e675071105f3f5c2df0dab17a3c3011167b4c5196706
|
# Original code: Copyright 2014 The University of Melbourne
# Copyright 2015 VPAC
#
# This file is part of Karaage.
#
# Karaage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Karaage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Karaage If not, see <http://www.gnu.org/licenses/>.
"""Test all pages render (without exceptions)."""
from __future__ import print_function, unicode_literals
import six
import re
import unittest
from django.conf import settings
from django.contrib.admindocs.views import simplify_regex
from django.test import TestCase
from django.utils.text import slugify
from django.utils.encoding import smart_text
from django.core.exceptions import ViewDoesNotExist
from django.core.urlresolvers import RegexURLPattern, RegexURLResolver, \
LocaleRegexURLResolver
from django.utils import translation
from karaage.middleware.threadlocals import reset
urlconf = __import__(settings.ROOT_URLCONF, {}, {}, [''])
def extract_views_from_urlpatterns(urlpatterns, base='', namespace=None):
"""
Return a list of views from a list of urlpatterns.
Each object in the returned list is a two-tuple: (view_func, regex)
"""
LANGUAGES = getattr(settings, 'LANGUAGES', ((None, None), ))
views = []
for p in urlpatterns:
if isinstance(p, RegexURLPattern):
try:
if not p.name:
name = p.name
elif namespace:
name = '{0}:{1}'.format(namespace, p.name)
else:
name = p.name
views.append((p.callback, base + p.regex.pattern, name))
except ViewDoesNotExist:
continue
elif isinstance(p, RegexURLResolver):
try:
patterns = p.url_patterns
except ImportError:
continue
if namespace and p.namespace:
_namespace = '{0}:{1}'.format(namespace, p.namespace)
else:
_namespace = (p.namespace or namespace)
if isinstance(p, LocaleRegexURLResolver):
for langauge in LANGUAGES:
with translation.override(langauge[0]):
views.extend(
extract_views_from_urlpatterns(
patterns, base + p.regex.pattern,
namespace=_namespace))
else:
views.extend(extract_views_from_urlpatterns(
patterns, base + p.regex.pattern, namespace=_namespace))
elif hasattr(p, '_get_callback'):
try:
views.append(
(p._get_callback(), base + p.regex.pattern, p.name))
except ViewDoesNotExist:
continue
elif hasattr(p, 'url_patterns') or hasattr(p, '_get_url_patterns'):
try:
patterns = p.url_patterns
except ImportError:
continue
views.extend(
extract_views_from_urlpatterns(
patterns, base + p.regex.pattern, namespace=namespace))
else:
raise TypeError(
"%s does not appear to be a urlpattern object" % (p,))
return views
def make_test_get_function(name, url, url_pattern):
def test_get(self):
self.assertEqual(
self.client.login(username='kgsuper', password='aq12ws'),
True,
'Login failed.',
)
resp = self.client.get(url, follow=True)
self.assertIn(
resp.status_code,
[200, 400, 403],
'HTTP Error {}: {} > {}'.format(
resp.status_code,
url_pattern,
url,
),
)
test_get.__name__ = str(name)
return test_get
class TestAllPagesMeta(type):
@classmethod
def _add_test_methods(mcs, attrs, urlpatterns):
# loop through every URL pattern
for index, (func, regex, url_name) in enumerate(
extract_views_from_urlpatterns(urlpatterns)):
if func.__module__.startswith("%s." % attrs['module']):
pass
elif func.__module__ == attrs['module']:
pass
else:
continue
if hasattr(func, '__name__'):
func_name = func.__name__
elif hasattr(func, '__class__'):
func_name = '%s()' % func.__class__.__name__
else:
func_name = re.sub(r' at 0x[0-9a-f]+', '', repr(func))
url_pattern = smart_text(simplify_regex(regex))
name = '_'.join(
[
'test',
func.__module__.replace('.', '_'),
slugify('%s' % func_name),
] + slugify(
url_pattern.replace('/', '_') or 'root'
).replace('_', ' ').split(),
)
url = url_pattern
for key, value in attrs['variables'].items():
url = url.replace('<%s>' % key, value)
# bail out if we don't know how to visit this URL properly
testfunc = unittest.skipIf(
any(
re.search(stop_pattern, url)
for stop_pattern
in [
r'<.*>',
]
),
'URL pattern %r contains stop pattern.' % url,
)(
make_test_get_function(name, url, url_pattern),
)
attrs[name] = testfunc
def __new__(mcs, name, parents, attrs):
if parents != (TestCase,):
mcs._add_test_methods(attrs, urlconf.urlpatterns)
return super(TestAllPagesMeta, mcs).__new__(mcs, name, parents, attrs)
@six.add_metaclass(TestAllPagesMeta)
class TestAllPagesCase(TestCase):
def setUp(self):
super(TestAllPagesCase, self).setUp()
def cleanup():
reset()
self.addCleanup(cleanup)
|
Karaage-Cluster/karaage-debian
|
karaage/tests/client.py
|
Python
|
gpl-3.0
| 6,504
|
[
"VisIt"
] |
bc8b8ed27f47ccdec0a00bd3a118ed7329f7bd330296e8e4eb8cf1313a1a6561
|
# -*- coding: utf-8 -*-
import itertools
import functools
import os
import re
import urllib
import logging
import pymongo
import datetime
import urlparse
from collections import OrderedDict
import warnings
import pytz
from flask import request
from django.core.urlresolvers import reverse
from modularodm import Q
from modularodm import fields
from modularodm.validators import MaxLengthValidator
from modularodm.exceptions import NoResultsFound
from modularodm.exceptions import ValidationTypeError
from modularodm.exceptions import ValidationValueError
from api.base.utils import absolute_reverse
from framework import status
from framework.mongo import ObjectId
from framework.mongo import StoredObject
from framework.addons import AddonModelMixin
from framework.auth import get_user, User, Auth
from framework.auth import signals as auth_signals
from framework.exceptions import PermissionsError
from framework.guid.model import GuidStoredObject
from framework.auth.utils import privacy_info_handle
from framework.analytics import tasks as piwik_tasks
from framework.mongo.utils import to_mongo, to_mongo_key, unique_on
from framework.analytics import (
get_basic_counters, increment_user_activity_counters
)
from framework.sentry import log_exception
from framework.transactions.context import TokuTransaction
from framework.utils import iso8601format
from website import language, mails, settings, tokens
from website.util import web_url_for
from website.util import api_url_for
from website.util import sanitize
from website.exceptions import (
NodeStateError,
InvalidSanctionApprovalToken, InvalidSanctionRejectionToken,
)
from website.citations.utils import datetime_to_csl
from website.identifiers.model import IdentifierMixin
from website.util.permissions import expand_permissions
from website.util.permissions import CREATOR_PERMISSIONS, DEFAULT_CONTRIBUTOR_PERMISSIONS, ADMIN
from website.project.metadata.schemas import OSF_META_SCHEMAS
from website.project import signals as project_signals
logger = logging.getLogger(__name__)
VIEW_PROJECT_URL_TEMPLATE = settings.DOMAIN + '{node_id}/'
def has_anonymous_link(node, auth):
"""check if the node is anonymous to the user
:param Node node: Node which the user wants to visit
:param str link: any view-only link in the current url
:return bool anonymous: Whether the node is anonymous to the user or not
"""
view_only_link = auth.private_key or request.args.get('view_only', '').strip('/')
if not view_only_link:
return False
if node.is_public:
return False
return any(
link.anonymous
for link in node.private_links_active
if link.key == view_only_link
)
class MetaSchema(StoredObject):
_id = fields.StringField(default=lambda: str(ObjectId()))
name = fields.StringField()
schema = fields.DictionaryField()
category = fields.StringField()
# Version of the Knockout metadata renderer to use (e.g. if data binds
# change)
metadata_version = fields.IntegerField()
# Version of the schema to use (e.g. if questions, responses change)
schema_version = fields.IntegerField()
def ensure_schemas(clear=True):
"""Import meta-data schemas from JSON to database, optionally clearing
database first.
:param clear: Clear schema database before import
"""
if clear:
try:
MetaSchema.remove()
except AttributeError:
if not settings.DEBUG_MODE:
raise
for schema in OSF_META_SCHEMAS:
try:
MetaSchema.find_one(
Q('name', 'eq', schema['name']) &
Q('schema_version', 'eq', schema['schema_version'])
)
except:
schema['name'] = schema['name'].replace(' ', '_')
schema_obj = MetaSchema(**schema)
schema_obj.save()
class MetaData(GuidStoredObject):
_id = fields.StringField(primary=True)
target = fields.AbstractForeignField(backref='metadata')
data = fields.DictionaryField()
date_created = fields.DateTimeField(auto_now_add=datetime.datetime.utcnow)
date_modified = fields.DateTimeField(auto_now=datetime.datetime.utcnow)
def validate_comment_reports(value, *args, **kwargs):
for key, val in value.iteritems():
if not User.load(key):
raise ValidationValueError('Keys must be user IDs')
if not isinstance(val, dict):
raise ValidationTypeError('Values must be dictionaries')
if 'category' not in val or 'text' not in val:
raise ValidationValueError(
'Values must include `category` and `text` keys'
)
class Comment(GuidStoredObject):
_id = fields.StringField(primary=True)
user = fields.ForeignField('user', required=True, backref='commented')
node = fields.ForeignField('node', required=True, backref='comment_owner')
target = fields.AbstractForeignField(required=True, backref='commented')
date_created = fields.DateTimeField(auto_now_add=datetime.datetime.utcnow)
date_modified = fields.DateTimeField(auto_now=datetime.datetime.utcnow)
modified = fields.BooleanField()
is_deleted = fields.BooleanField(default=False)
content = fields.StringField()
# Dictionary field mapping user IDs to dictionaries of report details:
# {
# 'icpnw': {'category': 'hate', 'message': 'offensive'},
# 'cdi38': {'category': 'spam', 'message': 'godwins law'},
# }
reports = fields.DictionaryField(validate=validate_comment_reports)
@classmethod
def create(cls, auth, **kwargs):
comment = cls(**kwargs)
comment.save()
comment.node.add_log(
NodeLog.COMMENT_ADDED,
{
'project': comment.node.parent_id,
'node': comment.node._id,
'user': comment.user._id,
'comment': comment._id,
},
auth=auth,
save=False,
)
comment.node.save()
return comment
def edit(self, content, auth, save=False):
self.content = content
self.modified = True
self.node.add_log(
NodeLog.COMMENT_UPDATED,
{
'project': self.node.parent_id,
'node': self.node._id,
'user': self.user._id,
'comment': self._id,
},
auth=auth,
save=False,
)
if save:
self.save()
def delete(self, auth, save=False):
self.is_deleted = True
self.node.add_log(
NodeLog.COMMENT_REMOVED,
{
'project': self.node.parent_id,
'node': self.node._id,
'user': self.user._id,
'comment': self._id,
},
auth=auth,
save=False,
)
if save:
self.save()
def undelete(self, auth, save=False):
self.is_deleted = False
self.node.add_log(
NodeLog.COMMENT_ADDED,
{
'project': self.node.parent_id,
'node': self.node._id,
'user': self.user._id,
'comment': self._id,
},
auth=auth,
save=False,
)
if save:
self.save()
def report_abuse(self, user, save=False, **kwargs):
"""Report that a comment is abuse.
:param User user: User submitting the report
:param bool save: Save changes
:param dict kwargs: Report details
:raises: ValueError if the user submitting abuse is the same as the
user who posted the comment
"""
if user == self.user:
raise ValueError
self.reports[user._id] = kwargs
if save:
self.save()
def unreport_abuse(self, user, save=False):
"""Revoke report of abuse.
:param User user: User who submitted the report
:param bool save: Save changes
:raises: ValueError if user has not reported comment as abuse
"""
try:
self.reports.pop(user._id)
except KeyError:
raise ValueError('User has not reported comment as abuse')
if save:
self.save()
@unique_on(['params.node', '_id'])
class NodeLog(StoredObject):
_id = fields.StringField(primary=True, default=lambda: str(ObjectId()))
date = fields.DateTimeField(default=datetime.datetime.utcnow, index=True)
action = fields.StringField(index=True)
params = fields.DictionaryField()
should_hide = fields.BooleanField(default=False)
was_connected_to = fields.ForeignField('node', list=True)
user = fields.ForeignField('user', backref='created')
foreign_user = fields.StringField()
DATE_FORMAT = '%m/%d/%Y %H:%M UTC'
# Log action constants -- NOTE: templates stored in log_templates.mako
CREATED_FROM = 'created_from'
PROJECT_CREATED = 'project_created'
PROJECT_REGISTERED = 'project_registered'
PROJECT_DELETED = 'project_deleted'
NODE_CREATED = 'node_created'
NODE_FORKED = 'node_forked'
NODE_REMOVED = 'node_removed'
POINTER_CREATED = 'pointer_created'
POINTER_FORKED = 'pointer_forked'
POINTER_REMOVED = 'pointer_removed'
WIKI_UPDATED = 'wiki_updated'
WIKI_DELETED = 'wiki_deleted'
WIKI_RENAMED = 'wiki_renamed'
MADE_WIKI_PUBLIC = 'made_wiki_public'
MADE_WIKI_PRIVATE = 'made_wiki_private'
CONTRIB_ADDED = 'contributor_added'
CONTRIB_REMOVED = 'contributor_removed'
CONTRIB_REORDERED = 'contributors_reordered'
PERMISSIONS_UPDATED = 'permissions_updated'
MADE_PRIVATE = 'made_private'
MADE_PUBLIC = 'made_public'
TAG_ADDED = 'tag_added'
TAG_REMOVED = 'tag_removed'
EDITED_TITLE = 'edit_title'
EDITED_DESCRIPTION = 'edit_description'
UPDATED_FIELDS = 'updated_fields'
FILE_MOVED = 'addon_file_moved'
FILE_COPIED = 'addon_file_copied'
FILE_RENAMED = 'addon_file_renamed'
FOLDER_CREATED = 'folder_created'
FILE_ADDED = 'file_added'
FILE_UPDATED = 'file_updated'
FILE_REMOVED = 'file_removed'
FILE_RESTORED = 'file_restored'
ADDON_ADDED = 'addon_added'
ADDON_REMOVED = 'addon_removed'
COMMENT_ADDED = 'comment_added'
COMMENT_REMOVED = 'comment_removed'
COMMENT_UPDATED = 'comment_updated'
MADE_CONTRIBUTOR_VISIBLE = 'made_contributor_visible'
MADE_CONTRIBUTOR_INVISIBLE = 'made_contributor_invisible'
EXTERNAL_IDS_ADDED = 'external_ids_added'
EMBARGO_APPROVED = 'embargo_approved'
EMBARGO_CANCELLED = 'embargo_cancelled'
EMBARGO_COMPLETED = 'embargo_completed'
EMBARGO_INITIATED = 'embargo_initiated'
RETRACTION_APPROVED = 'retraction_approved'
RETRACTION_CANCELLED = 'retraction_cancelled'
RETRACTION_INITIATED = 'retraction_initiated'
REGISTRATION_APPROVAL_CANCELLED = 'registration_cancelled'
REGISTRATION_APPROVAL_INITIATED = 'registration_initiated'
REGISTRATION_APPROVAL_APPROVED = 'registration_approved'
def __repr__(self):
return ('<NodeLog({self.action!r}, params={self.params!r}) '
'with id {self._id!r}>').format(self=self)
@property
def node(self):
"""Return the :class:`Node` associated with this log."""
return (
Node.load(self.params.get('node')) or
Node.load(self.params.get('project'))
)
@property
def tz_date(self):
'''Return the timezone-aware date.
'''
# Date should always be defined, but a few logs in production are
# missing dates; return None and log error if date missing
if self.date:
return self.date.replace(tzinfo=pytz.UTC)
logger.error('Date missing on NodeLog {}'.format(self._primary_key))
@property
def formatted_date(self):
'''Return the timezone-aware, ISO-formatted string representation of
this log's date.
'''
if self.tz_date:
return self.tz_date.isoformat()
def resolve_node(self, node):
"""A single `NodeLog` record may be attached to multiple `Node` records
(parents, forks, registrations, etc.), so the node that the log refers
to may not be the same as the node the user is viewing. Use
`resolve_node` to determine the relevant node to use for permission
checks.
:param Node node: Node being viewed
"""
if self.node == node or self.node in node.nodes:
return self.node
if node.is_fork_of(self.node) or node.is_registration_of(self.node):
return node
for child in node.nodes:
if child.is_fork_of(self.node) or node.is_registration_of(self.node):
return child
return False
def can_view(self, node, auth):
node_to_check = self.resolve_node(node)
if node_to_check:
return node_to_check.can_view(auth)
return False
def _render_log_contributor(self, contributor, anonymous=False):
user = User.load(contributor)
if not user:
return None
if self.node:
fullname = user.display_full_name(node=self.node)
else:
fullname = user.fullname
return {
'id': privacy_info_handle(user._primary_key, anonymous),
'fullname': privacy_info_handle(fullname, anonymous, name=True),
'registered': user.is_registered,
}
class Tag(StoredObject):
_id = fields.StringField(primary=True, validate=MaxLengthValidator(128))
def __repr__(self):
return '<Tag() with id {self._id!r}>'.format(self=self)
@property
def url(self):
return '/search/?tags={}'.format(self._id)
class Pointer(StoredObject):
"""A link to a Node. The Pointer delegates all but a few methods to its
contained Node. Forking and registration are overridden such that the
link is cloned, but its contained Node is not.
"""
#: Whether this is a pointer or not
primary = False
_id = fields.StringField()
node = fields.ForeignField('node', backref='_pointed')
_meta = {'optimistic': True}
def _clone(self):
if self.node:
clone = self.clone()
clone.node = self.node
clone.save()
return clone
def fork_node(self, *args, **kwargs):
return self._clone()
def register_node(self, *args, **kwargs):
return self._clone()
def use_as_template(self, *args, **kwargs):
return self._clone()
def resolve(self):
return self.node
def __getattr__(self, item):
"""Delegate attribute access to the node being pointed to."""
# Prevent backref lookups from being overriden by proxied node
try:
return super(Pointer, self).__getattr__(item)
except AttributeError:
pass
if self.node:
return getattr(self.node, item)
raise AttributeError(
'Pointer object has no attribute {0}'.format(
item
)
)
def get_pointer_parent(pointer):
"""Given a `Pointer` object, return its parent node.
"""
# The `parent_node` property of the `Pointer` schema refers to the parents
# of the pointed-at `Node`, not the parents of the `Pointer`; use the
# back-reference syntax to find the parents of the `Pointer`.
parent_refs = pointer.node__parent
assert len(parent_refs) == 1, 'Pointer must have exactly one parent.'
return parent_refs[0]
def validate_category(value):
"""Validator for Node#category. Makes sure that the value is one of the
categories defined in CATEGORY_MAP.
"""
if value not in Node.CATEGORY_MAP.keys():
raise ValidationValueError('Invalid value for category.')
return True
def validate_title(value):
"""Validator for Node#title. Makes sure that the value exists and is not
above 200 characters.
"""
if value is None or not value.strip():
raise ValidationValueError('Title cannot be blank.')
if len(value) > 200:
raise ValidationValueError('Title cannot exceed 200 characters.')
return True
def validate_user(value):
if value != {}:
user_id = value.iterkeys().next()
if User.find(Q('_id', 'eq', user_id)).count() != 1:
raise ValidationValueError('User does not exist.')
return True
class NodeUpdateError(Exception):
def __init__(self, reason, key, *args, **kwargs):
super(NodeUpdateError, self).__init__(*args, **kwargs)
self.key = key
self.reason = reason
class Node(GuidStoredObject, AddonModelMixin, IdentifierMixin):
#: Whether this is a pointer or not
primary = True
__indices__ = [{
'unique': False,
'key_or_list': [
('tags.$', pymongo.ASCENDING),
('is_public', pymongo.ASCENDING),
('is_deleted', pymongo.ASCENDING),
]
}]
# Node fields that trigger an update to Solr on save
SOLR_UPDATE_FIELDS = {
'title',
'category',
'description',
'visible_contributor_ids',
'tags',
'is_fork',
'is_registration',
'retraction',
'embargo',
'is_public',
'is_deleted',
'wiki_pages_current',
'is_retracted',
'node_license',
}
# Maps category identifier => Human-readable representation for use in
# titles, menus, etc.
# Use an OrderedDict so that menu items show in the correct order
CATEGORY_MAP = OrderedDict([
('', 'Uncategorized'),
('project', 'Project'),
('hypothesis', 'Hypothesis'),
('methods and measures', 'Methods and Measures'),
('procedure', 'Procedure'),
('instrumentation', 'Instrumentation'),
('data', 'Data'),
('analysis', 'Analysis'),
('communication', 'Communication'),
('other', 'Other'),
])
WRITABLE_WHITELIST = [
'title',
'description',
'category',
'node_license',
]
_id = fields.StringField(primary=True)
date_created = fields.DateTimeField(auto_now_add=datetime.datetime.utcnow, index=True)
# Privacy
is_public = fields.BooleanField(default=False, index=True)
# User mappings
permissions = fields.DictionaryField()
visible_contributor_ids = fields.StringField(list=True)
# Project Organization
is_dashboard = fields.BooleanField(default=False, index=True)
is_folder = fields.BooleanField(default=False, index=True)
# Expanded: Dictionary field mapping user IDs to expand state of this node:
# {
# 'icpnw': True,
# 'cdi38': False,
# }
expanded = fields.DictionaryField(default={}, validate=validate_user)
is_deleted = fields.BooleanField(default=False, index=True)
deleted_date = fields.DateTimeField(index=True)
is_registration = fields.BooleanField(default=False, index=True)
registered_date = fields.DateTimeField(index=True)
registered_user = fields.ForeignField('user', backref='registered')
registered_schema = fields.ForeignField('metaschema', backref='registered')
registered_meta = fields.DictionaryField()
registration_approval = fields.ForeignField('registrationapproval')
retraction = fields.ForeignField('retraction')
embargo = fields.ForeignField('embargo')
is_fork = fields.BooleanField(default=False, index=True)
forked_date = fields.DateTimeField(index=True)
title = fields.StringField(validate=validate_title)
description = fields.StringField()
category = fields.StringField(validate=validate_category, index=True)
# Representation of a nodes's license
# {
# 'id': <id>,
# 'name': <name>,
# 'text': <license text>,
# 'year' (optional): <year>,
# 'copyrightHolders' (optional): <copyright_holders>
# }
node_license = fields.DictionaryField(default=dict)
# One of 'public', 'private'
# TODO: Add validator
comment_level = fields.StringField(default='private')
wiki_pages_current = fields.DictionaryField()
wiki_pages_versions = fields.DictionaryField()
# Dictionary field mapping node wiki page to sharejs private uuid.
# {<page_name>: <sharejs_id>}
wiki_private_uuids = fields.DictionaryField()
file_guid_to_share_uuids = fields.DictionaryField()
creator = fields.ForeignField('user', backref='created')
contributors = fields.ForeignField('user', list=True, backref='contributed')
users_watching_node = fields.ForeignField('user', list=True, backref='watched')
logs = fields.ForeignField('nodelog', list=True, backref='logged')
tags = fields.ForeignField('tag', list=True, backref='tagged')
# Tags for internal use
system_tags = fields.StringField(list=True)
nodes = fields.AbstractForeignField(list=True, backref='parent')
forked_from = fields.ForeignField('node', backref='forked', index=True)
registered_from = fields.ForeignField('node', backref='registrations', index=True)
# The node (if any) used as a template for this node's creation
template_node = fields.ForeignField('node', backref='template_node', index=True)
piwik_site_id = fields.StringField()
# Dictionary field mapping user id to a list of nodes in node.nodes which the user has subscriptions for
# {<User.id>: [<Node._id>, <Node2._id>, ...] }
child_node_subscriptions = fields.DictionaryField(default=dict)
_meta = {
'optimistic': True,
}
def __init__(self, *args, **kwargs):
tags = kwargs.pop('tags', [])
super(Node, self).__init__(*args, **kwargs)
# Ensure when Node is created with tags through API, tags are added to Tag
if tags:
for tag in tags:
self.add_tag(tag, Auth(self.creator), save=False, log=False)
if kwargs.get('_is_loaded', False):
return
if self.creator:
self.contributors.append(self.creator)
self.set_visible(self.creator, visible=True, log=False)
# Add default creator permissions
for permission in CREATOR_PERMISSIONS:
self.add_permission(self.creator, permission, save=False)
def __repr__(self):
return ('<Node(title={self.title!r}, category={self.category!r}) '
'with _id {self._id!r}>').format(self=self)
# For Django compatibility
@property
def pk(self):
return self._id
@property
def license(self):
node_license = self.node_license
if not node_license and self.parent_node:
return self.parent_node.license
return node_license
@property
def category_display(self):
"""The human-readable representation of this node's category."""
return self.CATEGORY_MAP[self.category]
@property
def sanction(self):
sanction = self.registration_approval or self.embargo or self.retraction
if sanction:
return sanction
elif self.parent_node:
return self.parent_node.sanction
else:
return None
@property
def is_pending_registration(self):
if not self.is_registration:
return False
if self.registration_approval is None:
if self.parent_node:
return self.parent_node.is_pending_registration
return False
return self.registration_approval.pending_approval
@property
def is_registration_approved(self):
if self.registration_approval is None:
if self.parent_node:
return self.parent_node.is_registration_approved
return False
return self.registration_approval.is_approved
@property
def is_retracted(self):
if self.retraction is None:
if self.parent_node:
return self.parent_node.is_retracted
return False
return self.retraction.is_approved
@property
def is_pending_retraction(self):
if self.retraction is None:
if self.parent_node:
return self.parent_node.is_pending_retraction
return False
return self.retraction.pending_approval
@property
def embargo_end_date(self):
if self.embargo is None:
if self.parent_node:
return self.parent_node.embargo_end_date
return False
return self.embargo.embargo_end_date
@property
def is_pending_embargo(self):
if self.embargo is None:
if self.parent_node:
return self.parent_node.is_pending_embargo
return False
return self.embargo.pending_approval
@property
def is_pending_embargo_for_existing_registration(self):
""" Returns True if Node has an Embargo pending approval for an
existing registrations. This is used specifically to ensure
registrations pre-dating the Embargo feature do not get deleted if
their respective Embargo request is rejected.
"""
if self.embargo is None:
if self.parent_node:
return self.parent_node.is_pending_embargo_for_existing_registration
return False
return self.embargo.pending_registration
@property
def private_links(self):
return self.privatelink__shared
@property
def private_links_active(self):
return [x for x in self.private_links if not x.is_deleted]
@property
def private_link_keys_active(self):
return [x.key for x in self.private_links if not x.is_deleted]
@property
def private_link_keys_deleted(self):
return [x.key for x in self.private_links if x.is_deleted]
def path_above(self, auth):
parents = self.parents
return '/' + '/'.join([p.title if p.can_view(auth) else '-- private project --' for p in reversed(parents)])
@property
def ids_above(self):
parents = self.parents
return {p._id for p in parents}
@property
def nodes_active(self):
return [x for x in self.nodes if not x.is_deleted]
def can_edit(self, auth=None, user=None):
"""Return if a user is authorized to edit this node.
Must specify one of (`auth`, `user`).
:param Auth auth: Auth object to check
:param User user: User object to check
:returns: Whether user has permission to edit this node.
"""
if not auth and not user:
raise ValueError('Must pass either `auth` or `user`')
if auth and user:
raise ValueError('Cannot pass both `auth` and `user`')
user = user or auth.user
if auth:
is_api_node = auth.api_node == self
else:
is_api_node = False
return (
(user and self.has_permission(user, 'write'))
or is_api_node
)
def active_contributors(self, include=lambda n: True):
for contrib in self.contributors:
if contrib.is_active and include(contrib):
yield contrib
def is_admin_parent(self, user):
if self.has_permission(user, 'admin', check_parent=False):
return True
if self.parent_node:
return self.parent_node.is_admin_parent(user)
return False
def can_view(self, auth):
if not auth and not self.is_public:
return False
return (
self.is_public or
(auth.user and self.has_permission(auth.user, 'read')) or
auth.private_key in self.private_link_keys_active or
self.is_admin_parent(auth.user)
)
def is_expanded(self, user=None):
"""Return if a user is has expanded the folder in the dashboard view.
Must specify one of (`auth`, `user`).
:param User user: User object to check
:returns: Boolean if the folder is expanded.
"""
if user._id in self.expanded:
return self.expanded[user._id]
else:
return False
def expand(self, user=None):
self.expanded[user._id] = True
self.save()
def collapse(self, user=None):
self.expanded[user._id] = False
self.save()
def is_derived_from(self, other, attr):
derived_from = getattr(self, attr)
while True:
if derived_from is None:
return False
if derived_from == other:
return True
derived_from = getattr(derived_from, attr)
def is_fork_of(self, other):
return self.is_derived_from(other, 'forked_from')
def is_registration_of(self, other):
return self.is_derived_from(other, 'registered_from')
@property
def forks(self):
"""List of forks of this node"""
return list(self.node__forked.find(Q('is_deleted', 'eq', False) &
Q('is_registration', 'ne', True)))
def add_permission(self, user, permission, save=False):
"""Grant permission to a user.
:param str permission: Permission to grant
:param bool save: Save changes
:raises: ValueError if user already has permission
"""
if user._id not in self.permissions:
self.permissions[user._id] = [permission]
else:
if permission in self.permissions[user._id]:
raise ValueError('User already has permission {0}'.format(permission))
self.permissions[user._id].append(permission)
if save:
self.save()
def remove_permission(self, user, permission, save=False):
"""Revoke permission from a user.
:param User user: User to revoke permission from
:param str permission: Permission to revoke
:param bool save: Save changes
:raises: ValueError if user does not have permission
"""
try:
self.permissions[user._id].remove(permission)
except (KeyError, ValueError):
raise ValueError('User does not have permission {0}'.format(permission))
if save:
self.save()
def clear_permission(self, user, save=False):
"""Clear all permissions for a user.
:param User user: User to revoke permission from
:param bool save: Save changes
:raises: ValueError if user not in permissions
"""
try:
self.permissions.pop(user._id)
except KeyError:
raise ValueError(
'User {0} not in permissions list for node {1}'.format(
user._id, self._id,
)
)
if save:
self.save()
def set_permissions(self, user, permissions, save=False):
self.permissions[user._id] = permissions
if save:
self.save()
def has_permission(self, user, permission, check_parent=True):
"""Check whether user has permission.
:param User user: User to test
:param str permission: Required permission
:returns: User has required permission
"""
if user is None:
logger.warn('User is ``None``.')
return False
if permission in self.permissions.get(user._id, []):
return True
if permission == 'read' and check_parent:
return self.is_admin_parent(user)
return False
def has_permission_on_children(self, user, permission):
"""Checks if the given user has a given permission on any child nodes
that are not registrations or deleted
"""
if self.has_permission(user, permission):
return True
for node in self.nodes:
if not node.primary or node.is_deleted:
continue
if node.has_permission_on_children(user, permission):
return True
return False
def has_addon_on_children(self, addon):
"""Checks if a given node has a specific addon on child nodes
that are not registrations or deleted
"""
if self.has_addon(addon):
return True
for node in self.nodes:
if not node.primary or node.is_deleted:
continue
if node.has_addon_on_children(addon):
return True
return False
def get_permissions(self, user):
"""Get list of permissions for user.
:param User user: User to check
:returns: List of permissions
:raises: ValueError if user not found in permissions
"""
return self.permissions.get(user._id, [])
def adjust_permissions(self):
for key in self.permissions.keys():
if key not in self.contributors:
self.permissions.pop(key)
@property
def visible_contributors(self):
return [
User.load(_id)
for _id in self.visible_contributor_ids
]
@property
def parents(self):
if self.parent_node:
return [self.parent_node] + self.parent_node.parents
return []
@property
def admin_contributor_ids(self, contributors=None):
contributor_ids = self.contributors._to_primary_keys()
admin_ids = set()
for parent in self.parents:
admins = [
user for user, perms in parent.permissions.iteritems()
if 'admin' in perms
]
admin_ids.update(set(admins).difference(contributor_ids))
return admin_ids
@property
def admin_contributors(self):
return sorted(
[User.load(_id) for _id in self.admin_contributor_ids],
key=lambda user: user.family_name,
)
def get_visible(self, user):
if not self.is_contributor(user):
raise ValueError(u'User {0} not in contributors'.format(user))
return user._id in self.visible_contributor_ids
def update_visible_ids(self, save=False):
"""Update the order of `visible_contributor_ids`. Updating on making
a contributor visible is more efficient than recomputing order on
accessing `visible_contributors`.
"""
self.visible_contributor_ids = [
contributor._id
for contributor in self.contributors
if contributor._id in self.visible_contributor_ids
]
if save:
self.save()
def set_visible(self, user, visible, log=True, auth=None, save=False):
if not self.is_contributor(user):
raise ValueError(u'User {0} not in contributors'.format(user))
if visible and user._id not in self.visible_contributor_ids:
self.visible_contributor_ids.append(user._id)
self.update_visible_ids(save=False)
elif not visible and user._id in self.visible_contributor_ids:
if len(self.visible_contributor_ids) == 1:
raise ValueError('Must have at least one visible contributor')
self.visible_contributor_ids.remove(user._id)
else:
return
message = (
NodeLog.MADE_CONTRIBUTOR_VISIBLE
if visible
else NodeLog.MADE_CONTRIBUTOR_INVISIBLE
)
if log:
self.add_log(
message,
params={
'parent': self.parent_id,
'node': self._id,
'contributors': [user._id],
},
auth=auth,
save=False,
)
if save:
self.save()
def can_comment(self, auth):
if self.comment_level == 'public':
return auth.logged_in and (
self.is_public or
(auth.user and self.has_permission(auth.user, 'read'))
)
return self.is_contributor(auth.user)
def update(self, fields, auth=None, save=True):
if self.is_registration:
raise NodeUpdateError(reason="Registered content cannot be updated")
values = {}
for key, value in fields.iteritems():
if key not in self.WRITABLE_WHITELIST:
continue
with warnings.catch_warnings():
try:
# This is in place because historically projects and components
# live on different ElasticSearch indexes, and at the time of Node.save
# there is no reliable way to check what the old Node.category
# value was. When the cateogory changes it is possible to have duplicate/dead
# search entries, so always delete the ES doc on categoryt change
# TODO: consolidate Node indexes into a single index, refactor search
if key == 'category':
self.delete_search_entry()
###############
values[key] = {
'old': getattr(self, key),
'new': value,
}
setattr(self, key, value)
except AttributeError:
raise NodeUpdateError(reason="Invalid value for attribute '{0}'".format(key), key=key)
except warnings.Warning:
raise NodeUpdateError(reason="Attribute '{0}' doesn't exist on the Node class".format(key), key=key)
if save:
updated = self.save()
else:
updated = []
for key in values:
values[key]['new'] = getattr(self, key)
self.add_log(NodeLog.UPDATED_FIELDS,
params={
'node': self._id,
'updated_fields': {
key: {
'old': values[key]['old'],
'new': values[key]['new']
}
for key in values
}
},
auth=auth)
return updated
def save(self, *args, **kwargs):
update_piwik = kwargs.pop('update_piwik', True)
self.adjust_permissions()
first_save = not self._is_loaded
if first_save and self.is_dashboard:
existing_dashboards = self.creator.node__contributed.find(
Q('is_dashboard', 'eq', True)
)
if existing_dashboards.count() > 0:
raise NodeStateError("Only one dashboard allowed per user.")
is_original = not self.is_registration and not self.is_fork
if 'suppress_log' in kwargs.keys():
suppress_log = kwargs['suppress_log']
del kwargs['suppress_log']
else:
suppress_log = False
saved_fields = super(Node, self).save(*args, **kwargs)
if first_save and is_original and not suppress_log:
# TODO: This logic also exists in self.use_as_template()
for addon in settings.ADDONS_AVAILABLE:
if 'node' in addon.added_default:
self.add_addon(addon.short_name, auth=None, log=False)
# Define log fields for non-component project
log_action = NodeLog.PROJECT_CREATED
log_params = {
'node': self._primary_key,
}
if getattr(self, 'parent', None):
# Append log to parent
self.parent.nodes.append(self)
self.parent.save()
log_params.update({'parent_node': self.parent._primary_key})
# Add log with appropriate fields
self.add_log(
log_action,
params=log_params,
auth=Auth(user=self.creator),
log_date=self.date_created,
save=True,
)
# Only update Solr if at least one stored field has changed, and if
# public or privacy setting has changed
need_update = bool(self.SOLR_UPDATE_FIELDS.intersection(saved_fields))
if not self.is_public:
if first_save or 'is_public' not in saved_fields:
need_update = False
if self.is_folder or self.archiving:
need_update = False
if need_update:
self.update_search()
if 'node_license' in saved_fields:
children = [c for c in self.get_descendants_recursive(
include=lambda n: n.node_license == {}
)]
# this returns generator, that would get unspooled anyways
if children:
Node.bulk_update_search(children)
# This method checks what has changed.
if settings.PIWIK_HOST and update_piwik:
piwik_tasks.update_node(self._id, saved_fields)
# Return expected value for StoredObject::save
return saved_fields
######################################
# Methods that return a new instance #
######################################
def use_as_template(self, auth, changes=None, top_level=True):
"""Create a new project, using an existing project as a template.
:param auth: The user to be assigned as creator
:param changes: A dictionary of changes, keyed by node id, which
override the attributes of the template project or its
children.
:return: The `Node` instance created.
"""
changes = changes or dict()
# build the dict of attributes to change for the new node
try:
attributes = changes[self._id]
# TODO: explicitly define attributes which may be changed.
except (AttributeError, KeyError):
attributes = dict()
new = self.clone()
# clear permissions, which are not cleared by the clone method
new.permissions = {}
new.visible_contributor_ids = []
# Clear quasi-foreign fields
new.wiki_pages_current = {}
new.wiki_pages_versions = {}
new.wiki_private_uuids = {}
new.file_guid_to_share_uuids = {}
# set attributes which may be overridden by `changes`
new.is_public = False
new.description = None
# apply `changes`
for attr, val in attributes.iteritems():
setattr(new, attr, val)
# set attributes which may NOT be overridden by `changes`
new.creator = auth.user
new.template_node = self
new.add_contributor(contributor=auth.user, permissions=CREATOR_PERMISSIONS, log=False, save=False)
new.is_fork = False
new.is_registration = False
new.piwik_site_id = None
# If that title hasn't been changed, apply the default prefix (once)
if (new.title == self.title
and top_level
and language.TEMPLATED_FROM_PREFIX not in new.title):
new.title = ''.join((language.TEMPLATED_FROM_PREFIX, new.title, ))
# Slight hack - date_created is a read-only field.
new._fields['date_created'].__set__(
new,
datetime.datetime.utcnow(),
safe=True
)
new.save(suppress_log=True)
# Log the creation
new.add_log(
NodeLog.CREATED_FROM,
params={
'node': new._primary_key,
'template_node': {
'id': self._primary_key,
'url': self.url,
},
},
auth=auth,
log_date=new.date_created,
save=False,
)
# add mandatory addons
# TODO: This logic also exists in self.save()
for addon in settings.ADDONS_AVAILABLE:
if 'node' in addon.added_default:
new.add_addon(addon.short_name, auth=None, log=False)
# deal with the children of the node, if any
new.nodes = [
x.use_as_template(auth, changes, top_level=False)
for x in self.nodes
if x.can_view(auth)
]
new.save()
return new
############
# Pointers #
############
def add_pointer(self, node, auth, save=True):
"""Add a pointer to a node.
:param Node node: Node to add
:param Auth auth: Consolidated authorization
:param bool save: Save changes
:return: Created pointer
"""
# Fail if node already in nodes / pointers. Note: cast node and node
# to primary keys to test for conflicts with both nodes and pointers
# contained in `self.nodes`.
if node._id in self.node_ids:
raise ValueError(
'Pointer to node {0} already in list'.format(node._id)
)
if self.is_registration:
raise NodeStateError('Cannot add a pointer to a registration')
# If a folder, prevent more than one pointer to that folder. This will prevent infinite loops on the Dashboard.
# Also, no pointers to the dashboard project, which could cause loops as well.
already_pointed = node.pointed
if node.is_folder and len(already_pointed) > 0:
raise ValueError(
'Pointer to folder {0} already exists. Only one pointer to any given folder allowed'.format(node._id)
)
if node.is_dashboard:
raise ValueError(
'Pointer to dashboard ({0}) not allowed.'.format(node._id)
)
# Append pointer
pointer = Pointer(node=node)
pointer.save()
self.nodes.append(pointer)
# Add log
self.add_log(
action=NodeLog.POINTER_CREATED,
params={
'parent_node': self.parent_id,
'node': self._primary_key,
'pointer': {
'id': pointer.node._id,
'url': pointer.node.url,
'title': pointer.node.title,
'category': pointer.node.category,
},
},
auth=auth,
save=False,
)
# Optionally save changes
if save:
self.save()
return pointer
def rm_pointer(self, pointer, auth):
"""Remove a pointer.
:param Pointer pointer: Pointer to remove
:param Auth auth: Consolidated authorization
"""
if pointer not in self.nodes:
raise ValueError('Node link does not belong to the requested node.')
# Remove `Pointer` object; will also remove self from `nodes` list of
# parent node
Pointer.remove_one(pointer)
# Add log
self.add_log(
action=NodeLog.POINTER_REMOVED,
params={
'parent_node': self.parent_id,
'node': self._primary_key,
'pointer': {
'id': pointer.node._id,
'url': pointer.node.url,
'title': pointer.node.title,
'category': pointer.node.category,
},
},
auth=auth,
save=False,
)
@property
def node_ids(self):
return [
node._id if node.primary else node.node._id
for node in self.nodes
]
@property
def nodes_primary(self):
return [
node
for node in self.nodes
if node.primary
]
def node_and_primary_descendants(self):
"""Return an iterator for a node and all of its primary (non-pointer) descendants.
:param node Node: target Node
"""
return itertools.chain([self], self.get_descendants_recursive(lambda n: n.primary))
@property
def depth(self):
return len(self.parents)
def next_descendants(self, auth, condition=lambda auth, node: True):
"""
Recursively find the first set of descedants under a given node that meet a given condition
returns a list of [(node, [children]), ...]
"""
ret = []
for node in self.nodes:
if condition(auth, node):
# base case
ret.append((node, []))
else:
ret.append((node, node.next_descendants(auth, condition)))
ret = [item for item in ret if item[1] or condition(auth, item[0])] # prune empty branches
return ret
def get_descendants_recursive(self, include=lambda n: True):
for node in self.nodes:
if include(node):
yield node
if node.primary:
for descendant in node.get_descendants_recursive(include):
if include(descendant):
yield descendant
def get_aggregate_logs_queryset(self, auth):
ids = [self._id] + [n._id
for n in self.get_descendants_recursive()
if n.can_view(auth)]
query = Q('__backrefs.logged.node.logs', 'in', ids)
return NodeLog.find(query).sort('-_id')
@property
def nodes_pointer(self):
return [
node
for node in self.nodes
if not node.primary
]
@property
def has_pointers_recursive(self):
"""Recursively checks whether the current node or any of its nodes
contains a pointer.
"""
if self.nodes_pointer:
return True
for node in self.nodes_primary:
if node.has_pointers_recursive:
return True
return False
@property
def pointed(self):
return getattr(self, '_pointed', [])
def pointing_at(self, pointed_node_id):
"""This node is pointed at another node.
:param Node pointed_node_id: The node id of the node being pointed at.
:return: pointer_id
"""
for pointer in self.nodes_pointer:
node_id = pointer.node._id
if node_id == pointed_node_id:
return pointer._id
return None
def get_points(self, folders=False, deleted=False, resolve=True):
ret = []
for each in self.pointed:
pointer_node = get_pointer_parent(each)
if not folders and pointer_node.is_folder:
continue
if not deleted and pointer_node.is_deleted:
continue
if resolve:
ret.append(pointer_node)
else:
ret.append(each)
return ret
def resolve(self):
return self
def fork_pointer(self, pointer, auth, save=True):
"""Replace a pointer with a fork. If the pointer points to a project,
fork the project and replace the pointer with a new pointer pointing
to the fork. If the pointer points to a component, fork the component
and add it to the current node.
:param Pointer pointer:
:param Auth auth:
:param bool save:
:return: Forked node
"""
# Fail if pointer not contained in `nodes`
try:
index = self.nodes.index(pointer)
except ValueError:
raise ValueError('Pointer {0} not in list'.format(pointer._id))
# Get pointed node
node = pointer.node
# Fork into current node and replace pointer with forked component
forked = node.fork_node(auth)
if forked is None:
raise ValueError('Could not fork node')
self.nodes[index] = forked
# Add log
self.add_log(
NodeLog.POINTER_FORKED,
params={
'parent_node': self.parent_id,
'node': self._primary_key,
'pointer': {
'id': pointer.node._id,
'url': pointer.node.url,
'title': pointer.node.title,
'category': pointer.node.category,
},
},
auth=auth,
save=False,
)
# Optionally save changes
if save:
self.save()
# Garbage-collect pointer. Note: Must save current node before
# removing pointer, else remove will fail when trying to remove
# backref from self to pointer.
Pointer.remove_one(pointer)
# Return forked content
return forked
def get_recent_logs(self, n=10):
"""Return a list of the n most recent logs, in reverse chronological
order.
:param int n: Number of logs to retrieve
"""
return list(reversed(self.logs)[:n])
@property
def date_modified(self):
'''The most recent datetime when this node was modified, based on
the logs.
'''
try:
return self.logs[-1].date
except IndexError:
return self.date_created
def set_title(self, title, auth, save=False):
"""Set the title of this Node and log it.
:param str title: The new title.
:param auth: All the auth information including user, API key.
"""
#Called so validation does not have to wait until save.
validate_title(title)
original_title = self.title
self.title = title
self.add_log(
action=NodeLog.EDITED_TITLE,
params={
'parent_node': self.parent_id,
'node': self._primary_key,
'title_new': self.title,
'title_original': original_title,
},
auth=auth,
save=False,
)
if save:
self.save()
return None
def set_description(self, description, auth, save=False):
"""Set the description and log the event.
:param str description: The new description
:param auth: All the auth informtion including user, API key.
:param bool save: Save self after updating.
"""
original = self.description
self.description = description
self.add_log(
action=NodeLog.EDITED_DESCRIPTION,
params={
'parent_node': self.parent_id,
'node': self._primary_key,
'description_new': self.description,
'description_original': original
},
auth=auth,
save=False,
)
if save:
self.save()
return None
def update_search(self):
from website import search
try:
search.search.update_node(self)
except search.exceptions.SearchUnavailableError as e:
logger.exception(e)
log_exception()
@classmethod
def bulk_update_search(cls, nodes):
from website import search
try:
serialize = functools.partial(search.search.update_node, bulk=True)
search.search.bulk_update_nodes(serialize, nodes)
except search.exceptions.SearchUnavailableError as e:
logger.exception(e)
log_exception()
def delete_search_entry(self):
from website import search
try:
search.search.delete_node(self)
except search.exceptions.SearchUnavailableError as e:
logger.exception(e)
log_exception()
def delete_registration_tree(self, save=False):
self.is_deleted = True
if not getattr(self.embargo, 'for_existing_registration', False):
self.registered_from = None
if save:
self.save()
self.update_search()
for child in self.nodes_primary:
child.delete_registration_tree(save=save)
def remove_node(self, auth, date=None):
"""Marks a node as deleted.
TODO: Call a hook on addons
Adds a log to the parent node if applicable
:param auth: an instance of :class:`Auth`.
:param date: Date node was removed
:type date: `datetime.datetime` or `None`
"""
# TODO: rename "date" param - it's shadowing a global
if self.is_dashboard:
raise NodeStateError("Dashboards may not be deleted.")
if not self.can_edit(auth):
raise PermissionsError('{0!r} does not have permission to modify this {1}'.format(auth.user, self.category or 'node'))
#if this is a folder, remove all the folders that this is pointing at.
if self.is_folder:
for pointed in self.nodes_pointer:
if pointed.node.is_folder:
pointed.node.remove_node(auth=auth)
if [x for x in self.nodes_primary if not x.is_deleted]:
raise NodeStateError("Any child components must be deleted prior to deleting this project.")
# After delete callback
for addon in self.get_addons():
message = addon.after_delete(self, auth.user)
if message:
status.push_status_message(message, kind='info', trust=False)
log_date = date or datetime.datetime.utcnow()
# Add log to parent
if self.node__parent:
self.node__parent[0].add_log(
NodeLog.NODE_REMOVED,
params={
'project': self._primary_key,
},
auth=auth,
log_date=log_date,
save=True,
)
else:
self.add_log(
NodeLog.PROJECT_DELETED,
params={
'project': self._primary_key,
},
auth=auth,
log_date=log_date,
save=True,
)
self.is_deleted = True
self.deleted_date = date
self.save()
auth_signals.node_deleted.send(self)
return True
def fork_node(self, auth, title='Fork of '):
"""Recursively fork a node.
:param Auth auth: Consolidated authorization
:param str title: Optional text to prepend to forked title
:return: Forked node
"""
user = auth.user
# Non-contributors can't fork private nodes
if not (self.is_public or self.has_permission(user, 'read')):
raise PermissionsError('{0!r} does not have permission to fork node {1!r}'.format(user, self._id))
when = datetime.datetime.utcnow()
original = self.load(self._primary_key)
if original.is_deleted:
raise NodeStateError('Cannot fork deleted node.')
# Note: Cloning a node copies its `wiki_pages_current` and
# `wiki_pages_versions` fields, but does not clone the underlying
# database objects to which these dictionaries refer. This means that
# the cloned node must pass itself to its wiki objects to build the
# correct URLs to that content.
forked = original.clone()
forked.logs = self.logs
forked.tags = self.tags
# Recursively fork child nodes
for node_contained in original.nodes:
if not node_contained.is_deleted:
forked_node = None
try: # Catch the potential PermissionsError above
forked_node = node_contained.fork_node(auth=auth, title='')
except PermissionsError:
pass # If this exception is thrown omit the node from the result set
if forked_node is not None:
forked.nodes.append(forked_node)
forked.title = title + forked.title
forked.is_fork = True
forked.is_registration = False
forked.forked_date = when
forked.forked_from = original
forked.creator = user
forked.piwik_site_id = None
forked.node_license = original.license
# Forks default to private status
forked.is_public = False
# Clear permissions before adding users
forked.permissions = {}
forked.visible_contributor_ids = []
forked.add_contributor(
contributor=user,
permissions=CREATOR_PERMISSIONS,
log=False,
save=False
)
forked.add_log(
action=NodeLog.NODE_FORKED,
params={
'parent_node': original.parent_id,
'node': original._primary_key,
'registration': forked._primary_key,
},
auth=auth,
log_date=when,
save=False,
)
forked.save()
# After fork callback
for addon in original.get_addons():
_, message = addon.after_fork(original, forked, user)
if message:
status.push_status_message(message, kind='info', trust=True)
return forked
def register_node(self, schema, auth, template, data, parent=None):
"""Make a frozen copy of a node.
:param schema: Schema object
:param auth: All the auth information including user, API key.
:param template: Template name
:param data: Form data
:param parent Node: parent registration of registration to be created
"""
# NOTE: Admins can register child nodes even if they don't have write access them
if not self.can_edit(auth=auth) and not self.is_admin_parent(user=auth.user):
raise PermissionsError(
'User {} does not have permission '
'to register this node'.format(auth.user._id)
)
if self.is_folder:
raise NodeStateError("Folders may not be registered")
template = urllib.unquote_plus(template)
template = to_mongo(template)
when = datetime.datetime.utcnow()
original = self.load(self._primary_key)
# Note: Cloning a node copies its `wiki_pages_current` and
# `wiki_pages_versions` fields, but does not clone the underlying
# database objects to which these dictionaries refer. This means that
# the cloned node must pass itself to its wiki objects to build the
# correct URLs to that content.
if original.is_deleted:
raise NodeStateError('Cannot register deleted node.')
registered = original.clone()
registered.is_registration = True
registered.registered_date = when
registered.registered_user = auth.user
registered.registered_schema = schema
registered.registered_from = original
if not registered.registered_meta:
registered.registered_meta = {}
registered.registered_meta[template] = data
registered.contributors = self.contributors
registered.forked_from = self.forked_from
registered.creator = self.creator
registered.logs = self.logs
registered.tags = self.tags
registered.piwik_site_id = None
registered.node_license = original.license
registered.save()
if parent:
registered.parent_node = parent
# After register callback
for addon in original.get_addons():
_, message = addon.after_register(original, registered, auth.user)
if message:
status.push_status_message(message, kind='info', trust=False)
for node_contained in original.nodes:
if not node_contained.is_deleted:
child_registration = node_contained.register_node(
schema, auth, template, data, parent=registered
)
if child_registration and not child_registration.primary:
registered.nodes.append(child_registration)
registered.save()
if settings.ENABLE_ARCHIVER:
project_signals.after_create_registration.send(self, dst=registered, user=auth.user)
return registered
def remove_tag(self, tag, auth, save=True):
if tag in self.tags:
self.tags.remove(tag)
self.add_log(
action=NodeLog.TAG_REMOVED,
params={
'parent_node': self.parent_id,
'node': self._primary_key,
'tag': tag,
},
auth=auth,
save=False,
)
if save:
self.save()
def add_tag(self, tag, auth, save=True, log=True):
if tag not in self.tags:
new_tag = Tag.load(tag)
if not new_tag:
new_tag = Tag(_id=tag)
new_tag.save()
self.tags.append(new_tag)
if log:
self.add_log(
action=NodeLog.TAG_ADDED,
params={
'parent_node': self.parent_id,
'node': self._primary_key,
'tag': tag,
},
auth=auth,
save=False,
)
if save:
self.save()
def add_log(self, action, params, auth, foreign_user=None, log_date=None, save=True):
user = auth.user if auth else None
params['node'] = params.get('node') or params.get('project')
log = NodeLog(
action=action,
user=user,
foreign_user=foreign_user,
params=params,
)
if log_date:
log.date = log_date
log.save()
self.logs.append(log)
if save:
self.save()
if user:
increment_user_activity_counters(user._primary_key, action, log.date)
return log
@property
def url(self):
return '/{}/'.format(self._primary_key)
def web_url_for(self, view_name, _absolute=False, _guid=False, *args, **kwargs):
return web_url_for(view_name, pid=self._primary_key, _absolute=_absolute, _guid=_guid, *args, **kwargs)
def api_url_for(self, view_name, _absolute=False, *args, **kwargs):
return api_url_for(view_name, pid=self._primary_key, _absolute=_absolute, *args, **kwargs)
@property
def absolute_url(self):
if not self.url:
logger.error('Node {0} has a parent that is not a project'.format(self._id))
return None
return urlparse.urljoin(settings.DOMAIN, self.url)
@property
def display_absolute_url(self):
url = self.absolute_url
if url is not None:
return re.sub(r'https?:', '', url).strip('/')
@property
def api_v2_url(self):
return reverse('nodes:node-detail', kwargs={'node_id': self._id})
@property
def absolute_api_v2_url(self):
return absolute_reverse('nodes:node-detail', kwargs={'node_id': self._id})
# used by django and DRF
def get_absolute_url(self):
return self.absolute_api_v2_url
@property
def api_url(self):
if not self.url:
logger.error('Node {0} has a parent that is not a project'.format(self._id))
return None
return '/api/v1{0}'.format(self.deep_url)
@property
def deep_url(self):
return '/project/{}/'.format(self._primary_key)
@property
def csl(self): # formats node information into CSL format for citation parsing
"""a dict in CSL-JSON schema
For details on this schema, see:
https://github.com/citation-style-language/schema#csl-json-schema
"""
csl = {
'id': self._id,
'title': sanitize.unescape_entities(self.title),
'author': [
contributor.csl_name # method in auth/model.py which parses the names of authors
for contributor in self.visible_contributors
],
'publisher': 'Open Science Framework',
'type': 'webpage',
'URL': self.display_absolute_url,
}
doi = self.get_identifier_value('doi')
if doi:
csl['DOI'] = doi
if self.logs:
csl['issued'] = datetime_to_csl(self.logs[-1].date)
return csl
def author_list(self, and_delim='&'):
author_names = [
author.biblio_name
for author in self.visible_contributors
if author
]
if len(author_names) < 2:
return ' {0} '.format(and_delim).join(author_names)
if len(author_names) > 7:
author_names = author_names[:7]
author_names.append('et al.')
return ', '.join(author_names)
return u'{0}, {1} {2}'.format(
', '.join(author_names[:-1]),
and_delim,
author_names[-1]
)
@property
def templated_list(self):
return [
x
for x in self.node__template_node
if not x.is_deleted
]
@property
def parent_node(self):
"""The parent node, if it exists, otherwise ``None``. Note: this
property is named `parent_node` rather than `parent` to avoid a
conflict with the `parent` back-reference created by the `nodes`
field on this schema.
"""
try:
if not self.node__parent[0].is_deleted:
return self.node__parent[0]
except IndexError:
pass
return None
@parent_node.setter
def parent_node(self, parent):
parent.nodes.append(self)
parent.save()
@property
def root(self):
if self.parent_node:
return self.parent_node.root
else:
return self
@property
def archiving(self):
job = self.archive_job
return job and not job.done and not job.archive_tree_finished()
@property
def archive_job(self):
return self.archivejob__active[0] if self.archivejob__active else None
@property
def registrations(self):
return self.node__registrations.find(Q('archiving', 'eq', False))
@property
def watch_url(self):
return os.path.join(self.api_url, "watch/")
@property
def parent_id(self):
if self.node__parent:
return self.node__parent[0]._primary_key
return None
@property
def project_or_component(self):
return 'project' if self.category == 'project' else 'component'
def is_contributor(self, user):
return (
user is not None
and (
user._id in self.contributors
)
)
def add_addon(self, addon_name, auth, log=True, *args, **kwargs):
"""Add an add-on to the node. Do nothing if the addon is already
enabled.
:param str addon_name: Name of add-on
:param Auth auth: Consolidated authorization object
:param bool log: Add a log after adding the add-on
:return: A boolean, whether the addon was added
"""
ret = AddonModelMixin.add_addon(self, addon_name, auth=auth,
*args, **kwargs)
if ret and log:
config = settings.ADDONS_AVAILABLE_DICT[addon_name]
self.add_log(
action=NodeLog.ADDON_ADDED,
params={
'project': self.parent_id,
'node': self._primary_key,
'addon': config.full_name,
},
auth=auth,
save=False,
)
self.save() # TODO: here, or outside the conditional? @mambocab
return ret
def delete_addon(self, addon_name, auth, _force=False):
"""Delete an add-on from the node.
:param str addon_name: Name of add-on
:param Auth auth: Consolidated authorization object
:param bool _force: For migration testing ONLY. Do not set to True
in the application, or else projects will be allowed to delete
mandatory add-ons!
:return bool: Add-on was deleted
"""
ret = super(Node, self).delete_addon(addon_name, auth, _force)
if ret:
config = settings.ADDONS_AVAILABLE_DICT[addon_name]
self.add_log(
action=NodeLog.ADDON_REMOVED,
params={
'project': self.parent_id,
'node': self._primary_key,
'addon': config.full_name,
},
auth=auth,
save=False,
)
self.save()
# TODO: save here or outside the conditional? @mambocab
return ret
def callback(self, callback, recursive=False, *args, **kwargs):
"""Invoke callbacks of attached add-ons and collect messages.
:param str callback: Name of callback method to invoke
:param bool recursive: Apply callback recursively over nodes
:return list: List of callback messages
"""
messages = []
for addon in self.get_addons():
method = getattr(addon, callback)
message = method(self, *args, **kwargs)
if message:
messages.append(message)
if recursive:
for child in self.nodes:
if not child.is_deleted:
messages.extend(
child.callback(
callback, recursive, *args, **kwargs
)
)
return messages
def replace_contributor(self, old, new):
for i, contrib in enumerate(self.contributors):
if contrib._primary_key == old._primary_key:
self.contributors[i] = new
# Remove unclaimed record for the project
if self._primary_key in old.unclaimed_records:
del old.unclaimed_records[self._primary_key]
old.save()
for permission in self.get_permissions(old):
self.add_permission(new, permission)
self.permissions.pop(old._id)
if old._id in self.visible_contributor_ids:
self.visible_contributor_ids[self.visible_contributor_ids.index(old._id)] = new._id
return True
return False
def remove_contributor(self, contributor, auth, log=True):
"""Remove a contributor from this node.
:param contributor: User object, the contributor to be removed
:param auth: All the auth information including user, API key.
"""
# remove unclaimed record if necessary
if self._primary_key in contributor.unclaimed_records:
del contributor.unclaimed_records[self._primary_key]
self.contributors.remove(contributor._id)
self.clear_permission(contributor)
if contributor._id in self.visible_contributor_ids:
self.visible_contributor_ids.remove(contributor._id)
if not self.visible_contributor_ids:
return False
# Node must have at least one registered admin user
# TODO: Move to validator or helper
admins = [
user for user in self.contributors
if self.has_permission(user, 'admin')
and user.is_registered
]
if not admins:
return False
# Clear permissions for removed user
self.permissions.pop(contributor._id, None)
# After remove callback
for addon in self.get_addons():
message = addon.after_remove_contributor(self, contributor, auth)
if message:
status.push_status_message(message, kind='info', trust=True)
if log:
self.add_log(
action=NodeLog.CONTRIB_REMOVED,
params={
'project': self.parent_id,
'node': self._primary_key,
'contributors': [contributor._id],
},
auth=auth,
save=False,
)
self.save()
#send signal to remove this user from project subscriptions
auth_signals.contributor_removed.send(contributor, node=self)
return True
def remove_contributors(self, contributors, auth=None, log=True, save=False):
results = []
removed = []
for contrib in contributors:
outcome = self.remove_contributor(
contributor=contrib, auth=auth, log=False,
)
results.append(outcome)
removed.append(contrib._id)
if log:
self.add_log(
action=NodeLog.CONTRIB_REMOVED,
params={
'project': self.parent_id,
'node': self._primary_key,
'contributors': removed,
},
auth=auth,
save=False,
)
if save:
self.save()
if False in results:
return False
return True
def update_contributor(self, user, permission, visible, auth, save=False):
""" TODO: this method should be updated as a replacement for the main loop of
Node#manage_contributors. Right now there are redundancies, but to avoid major
feature creep this will not be included as this time.
Also checks to make sure unique admin is not removing own admin privilege.
"""
if not self.has_permission(auth.user, ADMIN):
raise PermissionsError("Only admins can modify contributor permissions")
permissions = expand_permissions(permission) or DEFAULT_CONTRIBUTOR_PERMISSIONS
admins = [contrib for contrib in self.contributors if self.has_permission(contrib, 'admin') and contrib.is_active]
if not len(admins) > 1:
# has only one admin
admin = admins[0]
if admin == user and ADMIN not in permissions:
raise NodeStateError('{} is the only admin.'.format(user.fullname))
if user not in self.contributors:
raise ValueError(
'User {0} not in contributors'.format(user.fullname)
)
if permission:
permissions = expand_permissions(permission)
if set(permissions) != set(self.get_permissions(user)):
self.set_permissions(user, permissions, save=save)
permissions_changed = {
user._id: permissions
}
self.add_log(
action=NodeLog.PERMISSIONS_UPDATED,
params={
'project': self.parent_id,
'node': self._id,
'contributors': permissions_changed,
},
auth=auth,
save=save
)
with TokuTransaction():
if ['read'] in permissions_changed.values():
project_signals.write_permissions_revoked.send(self)
if visible is not None:
self.set_visible(user, visible, auth=auth, save=save)
self.update_visible_ids()
def manage_contributors(self, user_dicts, auth, save=False):
"""Reorder and remove contributors.
:param list user_dicts: Ordered list of contributors represented as
dictionaries of the form:
{'id': <id>, 'permission': <One of 'read', 'write', 'admin'>, 'visible': bool}
:param Auth auth: Consolidated authentication information
:param bool save: Save changes
:raises: ValueError if any users in `users` not in contributors or if
no admin contributors remaining
"""
with TokuTransaction():
users = []
user_ids = []
permissions_changed = {}
visibility_removed = []
to_retain = []
to_remove = []
for user_dict in user_dicts:
user = User.load(user_dict['id'])
if user is None:
raise ValueError('User not found')
if user not in self.contributors:
raise ValueError(
'User {0} not in contributors'.format(user.fullname)
)
permissions = expand_permissions(user_dict['permission'])
if set(permissions) != set(self.get_permissions(user)):
self.set_permissions(user, permissions, save=False)
permissions_changed[user._id] = permissions
# visible must be added before removed to ensure they are validated properly
if user_dict['visible']:
self.set_visible(user,
visible=True,
auth=auth)
else:
visibility_removed.append(user)
users.append(user)
user_ids.append(user_dict['id'])
for user in visibility_removed:
self.set_visible(user,
visible=False,
auth=auth)
for user in self.contributors:
if user._id in user_ids:
to_retain.append(user)
else:
to_remove.append(user)
# TODO: Move to validator or helper @jmcarp
admins = [
user for user in users
if self.has_permission(user, 'admin')
and user.is_registered
]
if users is None or not admins:
raise ValueError(
'Must have at least one registered admin contributor'
)
if to_retain != users:
self.add_log(
action=NodeLog.CONTRIB_REORDERED,
params={
'project': self.parent_id,
'node': self._id,
'contributors': [
user._id
for user in users
],
},
auth=auth,
save=False,
)
if to_remove:
self.remove_contributors(to_remove, auth=auth, save=False)
self.contributors = users
if permissions_changed:
self.add_log(
action=NodeLog.PERMISSIONS_UPDATED,
params={
'project': self.parent_id,
'node': self._id,
'contributors': permissions_changed,
},
auth=auth,
save=False,
)
# Update list of visible IDs
self.update_visible_ids()
if save:
self.save()
with TokuTransaction():
if to_remove or permissions_changed and ['read'] in permissions_changed.values():
project_signals.write_permissions_revoked.send(self)
def add_contributor(self, contributor, permissions=None, visible=True,
auth=None, log=True, save=False):
"""Add a contributor to the project.
:param User contributor: The contributor to be added
:param list permissions: Permissions to grant to the contributor
:param bool visible: Contributor is visible in project dashboard
:param Auth auth: All the auth information including user, API key
:param bool log: Add log to self
:param bool save: Save after adding contributor
:returns: Whether contributor was added
"""
MAX_RECENT_LENGTH = 15
# If user is merged into another account, use master account
contrib_to_add = contributor.merged_by if contributor.is_merged else contributor
if contrib_to_add not in self.contributors:
self.contributors.append(contrib_to_add)
if visible:
self.set_visible(contrib_to_add, visible=True, log=False)
# Add default contributor permissions
permissions = permissions or DEFAULT_CONTRIBUTOR_PERMISSIONS
for permission in permissions:
self.add_permission(contrib_to_add, permission, save=False)
# Add contributor to recently added list for user
if auth is not None:
user = auth.user
if contrib_to_add in user.recently_added:
user.recently_added.remove(contrib_to_add)
user.recently_added.insert(0, contrib_to_add)
while len(user.recently_added) > MAX_RECENT_LENGTH:
user.recently_added.pop()
if log:
self.add_log(
action=NodeLog.CONTRIB_ADDED,
params={
'project': self.parent_id,
'node': self._primary_key,
'contributors': [contrib_to_add._primary_key],
},
auth=auth,
save=False,
)
if save:
self.save()
project_signals.contributor_added.send(self, contributor=contributor)
return True
#Permissions must be overridden if changed when contributor is added to parent he/she is already on a child of.
elif contrib_to_add in self.contributors and permissions is not None:
self.set_permissions(contrib_to_add, permissions)
if save:
self.save()
return False
else:
return False
def add_contributors(self, contributors, auth=None, log=True, save=False):
"""Add multiple contributors
:param list contributors: A list of dictionaries of the form:
{
'user': <User object>,
'permissions': <Permissions list, e.g. ['read', 'write']>,
'visible': <Boolean indicating whether or not user is a bibliographic contributor>
}
:param auth: All the auth information including user, API key.
:param log: Add log to self
:param save: Save after adding contributor
"""
for contrib in contributors:
self.add_contributor(
contributor=contrib['user'], permissions=contrib['permissions'],
visible=contrib['visible'], auth=auth, log=False, save=False,
)
if log and contributors:
self.add_log(
action=NodeLog.CONTRIB_ADDED,
params={
'project': self.parent_id,
'node': self._primary_key,
'contributors': [
contrib['user']._id
for contrib in contributors
],
},
auth=auth,
save=False,
)
if save:
self.save()
def add_unregistered_contributor(self, fullname, email, auth,
permissions=None, save=False):
"""Add a non-registered contributor to the project.
:param str fullname: The full name of the person.
:param str email: The email address of the person.
:param Auth auth: Auth object for the user adding the contributor.
:returns: The added contributor
:raises: DuplicateEmailError if user with given email is already in the database.
"""
# Create a new user record
contributor = User.create_unregistered(fullname=fullname, email=email)
contributor.add_unclaimed_record(node=self, referrer=auth.user,
given_name=fullname, email=email)
try:
contributor.save()
except ValidationValueError: # User with same email already exists
contributor = get_user(email=email)
# Unregistered users may have multiple unclaimed records, so
# only raise error if user is registered.
if contributor.is_registered or self.is_contributor(contributor):
raise
contributor.add_unclaimed_record(node=self, referrer=auth.user,
given_name=fullname, email=email)
contributor.save()
self.add_contributor(
contributor, permissions=permissions, auth=auth,
log=True, save=False,
)
self.save()
return contributor
def set_privacy(self, permissions, auth=None, log=True, save=True):
"""Set the permissions for this node.
:param permissions: A string, either 'public' or 'private'
:param auth: All the auth information including user, API key.
:param bool log: Whether to add a NodeLog for the privacy change.
"""
if permissions == 'public' and not self.is_public:
if self.is_registration:
if self.is_pending_embargo:
raise NodeStateError("A registration with an unapproved embargo cannot be made public.")
if self.embargo_end_date and not self.is_pending_embargo:
self.embargo.state = Embargo.REJECTED
self.embargo.save()
self.is_public = True
elif permissions == 'private' and self.is_public:
if self.is_registration and not self.is_pending_embargo:
raise NodeStateError("Public registrations must be retracted, not made private.")
else:
self.is_public = False
else:
return False
# After set permissions callback
for addon in self.get_addons():
message = addon.after_set_privacy(self, permissions)
if message:
status.push_status_message(message, kind='info', trust=False)
if log:
action = NodeLog.MADE_PUBLIC if permissions == 'public' else NodeLog.MADE_PRIVATE
self.add_log(
action=action,
params={
'project': self.parent_id,
'node': self._primary_key,
},
auth=auth,
save=False,
)
if save:
self.save()
return True
def admin_public_wiki(self, user):
return (
self.has_addon('wiki') and
self.has_permission(user, 'admin') and
self.is_public
)
def include_wiki_settings(self, user):
"""Check if node meets requirements to make publicly editable."""
return (
self.admin_public_wiki(user) or
any(
each.admin_public_wiki(user)
for each in self.get_descendants_recursive()
)
)
# TODO: Move to wiki add-on
def get_wiki_page(self, name=None, version=None, id=None):
from website.addons.wiki.model import NodeWikiPage
if name:
name = (name or '').strip()
key = to_mongo_key(name)
try:
if version and (isinstance(version, int) or version.isdigit()):
id = self.wiki_pages_versions[key][int(version) - 1]
elif version == 'previous':
id = self.wiki_pages_versions[key][-2]
elif version == 'current' or version is None:
id = self.wiki_pages_current[key]
else:
return None
except (KeyError, IndexError):
return None
return NodeWikiPage.load(id)
# TODO: Move to wiki add-on
def update_node_wiki(self, name, content, auth):
"""Update the node's wiki page with new content.
:param page: A string, the page's name, e.g. ``"home"``.
:param content: A string, the posted content.
:param auth: All the auth information including user, API key.
"""
from website.addons.wiki.model import NodeWikiPage
name = (name or '').strip()
key = to_mongo_key(name)
if key not in self.wiki_pages_current:
if key in self.wiki_pages_versions:
version = len(self.wiki_pages_versions[key]) + 1
else:
version = 1
else:
current = NodeWikiPage.load(self.wiki_pages_current[key])
current.is_current = False
version = current.version + 1
current.save()
new_page = NodeWikiPage(
page_name=name,
version=version,
user=auth.user,
is_current=True,
node=self,
content=content
)
new_page.save()
# check if the wiki page already exists in versions (existed once and is now deleted)
if key not in self.wiki_pages_versions:
self.wiki_pages_versions[key] = []
self.wiki_pages_versions[key].append(new_page._primary_key)
self.wiki_pages_current[key] = new_page._primary_key
self.add_log(
action=NodeLog.WIKI_UPDATED,
params={
'project': self.parent_id,
'node': self._primary_key,
'page': new_page.page_name,
'page_id': new_page._primary_key,
'version': new_page.version,
},
auth=auth,
log_date=new_page.date,
save=False,
)
self.save()
# TODO: Move to wiki add-on
def rename_node_wiki(self, name, new_name, auth):
"""Rename the node's wiki page with new name.
:param name: A string, the page's name, e.g. ``"My Page"``.
:param new_name: A string, the new page's name, e.g. ``"My Renamed Page"``.
:param auth: All the auth information including user, API key.
"""
# TODO: Fix circular imports
from website.addons.wiki.exceptions import (
PageCannotRenameError,
PageConflictError,
PageNotFoundError,
)
name = (name or '').strip()
key = to_mongo_key(name)
new_name = (new_name or '').strip()
new_key = to_mongo_key(new_name)
page = self.get_wiki_page(name)
if key == 'home':
raise PageCannotRenameError('Cannot rename wiki home page')
if not page:
raise PageNotFoundError('Wiki page not found')
if (new_key in self.wiki_pages_current and key != new_key) or new_key == 'home':
raise PageConflictError(
'Page already exists with name {0}'.format(
new_name,
)
)
# rename the page first in case we hit a validation exception.
old_name = page.page_name
page.rename(new_name)
# TODO: merge historical records like update (prevents log breaks)
# transfer the old page versions/current keys to the new name.
if key != new_key:
self.wiki_pages_versions[new_key] = self.wiki_pages_versions[key]
del self.wiki_pages_versions[key]
self.wiki_pages_current[new_key] = self.wiki_pages_current[key]
del self.wiki_pages_current[key]
if key in self.wiki_private_uuids:
self.wiki_private_uuids[new_key] = self.wiki_private_uuids[key]
del self.wiki_private_uuids[key]
self.add_log(
action=NodeLog.WIKI_RENAMED,
params={
'project': self.parent_id,
'node': self._primary_key,
'page': page.page_name,
'page_id': page._primary_key,
'old_page': old_name,
'version': page.version,
},
auth=auth,
save=False,
)
self.save()
def delete_node_wiki(self, name, auth):
name = (name or '').strip()
key = to_mongo_key(name)
page = self.get_wiki_page(key)
del self.wiki_pages_current[key]
self.add_log(
action=NodeLog.WIKI_DELETED,
params={
'project': self.parent_id,
'node': self._primary_key,
'page': page.page_name,
'page_id': page._primary_key,
},
auth=auth,
save=False,
)
self.save()
def get_stats(self, detailed=False):
if detailed:
raise NotImplementedError(
'Detailed stats exist, but are not yet implemented.'
)
else:
return get_basic_counters('node:%s' % self._primary_key)
# TODO: Deprecate this; it duplicates much of what serialize_project already
# does
def serialize(self, auth=None):
"""Dictionary representation of node that is nested within a NodeLog's
representation.
"""
# TODO: incomplete implementation
return {
'id': str(self._primary_key),
'category': self.category_display,
'node_type': self.project_or_component,
'url': self.url,
# TODO: Titles shouldn't contain escaped HTML in the first place
'title': sanitize.unescape_entities(self.title),
'path': self.path_above(auth),
'api_url': self.api_url,
'is_public': self.is_public,
'is_registration': self.is_registration,
}
def _initiate_retraction(self, user, justification=None):
"""Initiates the retraction process for a registration
:param user: User who initiated the retraction
:param justification: Justification, if given, for retraction
"""
retraction = Retraction(
initiated_by=user,
justification=justification or None, # make empty strings None
state=Retraction.UNAPPROVED
)
retraction.save() # Save retraction so it has a primary key
self.retraction = retraction
self.save() # Set foreign field reference Node.retraction
admins = [contrib for contrib in self.contributors if self.has_permission(contrib, 'admin') and contrib.is_active]
for admin in admins:
retraction.add_authorizer(admin)
retraction.save() # Save retraction approval state
return retraction
def retract_registration(self, user, justification=None, save=True):
"""Retract public registration. Instantiate new Retraction object
and associate it with the respective registration.
"""
if not self.is_registration or (not self.is_public and not (self.embargo_end_date or self.is_pending_embargo)):
raise NodeStateError('Only public or embargoed registrations may be retracted.')
if self.root is not self:
raise NodeStateError('Retraction of non-parent registrations is not permitted.')
retraction = self._initiate_retraction(user, justification)
self.registered_from.add_log(
action=NodeLog.RETRACTION_INITIATED,
params={
'node': self._id,
'retraction_id': retraction._id,
},
auth=Auth(user),
)
self.retraction = retraction
if save:
self.save()
def _is_embargo_date_valid(self, end_date):
today = datetime.datetime.utcnow()
if (end_date - today) >= settings.EMBARGO_END_DATE_MIN:
if (end_date - today) <= settings.EMBARGO_END_DATE_MAX:
return True
return False
def _initiate_embargo(self, user, end_date, for_existing_registration=False):
"""Initiates the retraction process for a registration
:param user: User who initiated the retraction
:param end_date: Date when the registration should be made public
"""
embargo = Embargo(
initiated_by=user,
end_date=datetime.datetime.combine(end_date, datetime.datetime.min.time()),
for_existing_registration=for_existing_registration
)
embargo.save() # Save embargo so it has a primary key
self.embargo = embargo
self.save() # Set foreign field reference Node.embargo
admins = [contrib for contrib in self.contributors if self.has_permission(contrib, 'admin') and contrib.is_active]
for admin in admins:
embargo.add_authorizer(admin)
embargo.save() # Save embargo's approval_state
return embargo
def embargo_registration(self, user, end_date, for_existing_registration=False):
"""Enter registration into an embargo period at end of which, it will
be made public
:param user: User initiating the embargo
:param end_date: Date when the registration should be made public
:raises: NodeStateError if Node is not a registration
:raises: PermissionsError if user is not an admin for the Node
:raises: ValidationValueError if end_date is not within time constraints
"""
if not self.is_registration:
raise NodeStateError('Only registrations may be embargoed')
if not self.has_permission(user, 'admin'):
raise PermissionsError('Only admins may embargo a registration')
if not self._is_embargo_date_valid(end_date):
raise ValidationValueError('Embargo end date must be more than one day in the future')
embargo = self._initiate_embargo(user, end_date, for_existing_registration=for_existing_registration)
self.registered_from.add_log(
action=NodeLog.EMBARGO_INITIATED,
params={
'node': self._id,
'embargo_id': embargo._id,
},
auth=Auth(user),
save=True,
)
if self.is_public:
self.set_privacy('private', Auth(user))
def _initiate_approval(self, user):
end_date = datetime.datetime.now() + settings.REGISTRATION_APPROVAL_TIME
approval = RegistrationApproval(
initiated_by=user,
end_date=end_date,
)
approval.save() # Save approval so it has a primary key
self.registration_approval = approval
self.save() # Set foreign field reference Node.registration_approval
admins = [contrib for contrib in self.contributors if self.has_permission(contrib, 'admin') and contrib.is_active]
for admin in admins:
approval.add_authorizer(admin)
approval.save() # Save approval's approval_state
return approval
def require_approval(self, user):
if not self.is_registration:
raise NodeStateError('Only registrations can require registration approval')
if not self.has_permission(user, 'admin'):
raise PermissionsError('Only admins can initiate a registration approval')
approval = self._initiate_approval(user)
self.registered_from.add_log(
action=NodeLog.REGISTRATION_APPROVAL_INITIATED,
params={
'node': self._id,
'registration_approval_id': approval._id,
},
auth=Auth(user),
save=True,
)
# TODO make private?
@Node.subscribe('before_save')
def validate_permissions(schema, instance):
"""Ensure that user IDs in `contributors` and `permissions` match.
"""
node = instance
contributor_ids = set([user._id for user in node.contributors])
permission_ids = set(node.permissions.keys())
mismatched_contributors = contributor_ids.difference(permission_ids)
if mismatched_contributors:
raise ValidationValueError(
'Contributors {0} missing from `permissions` on node {1}'.format(
', '.join(mismatched_contributors),
node._id,
)
)
mismatched_permissions = permission_ids.difference(contributor_ids)
if mismatched_permissions:
raise ValidationValueError(
'Permission keys {0} missing from `contributors` on node {1}'.format(
', '.join(mismatched_contributors),
node._id,
)
)
@Node.subscribe('before_save')
def validate_visible_contributors(schema, instance):
"""Ensure that user IDs in `contributors` and `visible_contributor_ids`
match.
"""
node = instance
for user_id in node.visible_contributor_ids:
if user_id not in node.contributors:
raise ValidationValueError(
('User {0} is in `visible_contributor_ids` but not in '
'`contributors` on node {1}').format(
user_id,
node._id,
)
)
class WatchConfig(StoredObject):
_id = fields.StringField(primary=True, default=lambda: str(ObjectId()))
node = fields.ForeignField('Node', backref='watched')
digest = fields.BooleanField(default=False)
immediate = fields.BooleanField(default=False)
def __repr__(self):
return '<WatchConfig(node="{self.node}")>'.format(self=self)
class PrivateLink(StoredObject):
_id = fields.StringField(primary=True, default=lambda: str(ObjectId()))
date_created = fields.DateTimeField(auto_now_add=datetime.datetime.utcnow)
key = fields.StringField(required=True)
name = fields.StringField()
is_deleted = fields.BooleanField(default=False)
anonymous = fields.BooleanField(default=False)
nodes = fields.ForeignField('node', list=True, backref='shared')
creator = fields.ForeignField('user', backref='created')
@property
def node_ids(self):
node_ids = [node._id for node in self.nodes]
return node_ids
def node_scale(self, node):
# node may be None if previous node's parent is deleted
if node is None or node.parent_id not in self.node_ids:
return -40
else:
offset = 20 if node.parent_node is not None else 0
return offset + self.node_scale(node.parent_node)
def to_json(self):
return {
"id": self._id,
"date_created": iso8601format(self.date_created),
"key": self.key,
"name": self.name,
"creator": {'fullname': self.creator.fullname, 'url': self.creator.profile_url},
"nodes": [{'title': x.title, 'url': x.url, 'scale': str(self.node_scale(x)) + 'px', 'category': x.category}
for x in self.nodes if not x.is_deleted],
"anonymous": self.anonymous
}
class Sanction(StoredObject):
"""Sanction object is a generic way to track approval states"""
abstract = True
UNAPPROVED = 'unapproved'
APPROVED = 'approved'
REJECTED = 'rejected'
DISPLAY_NAME = 'Sanction'
# SHORT_NAME must correspond with the associated foreign field to query against,
# e.g. Node.find_one(Q(sanction.SHORT_NAME, 'eq', sanction))
SHORT_NAME = 'sanction'
APPROVAL_NOT_AUTHORIZED_MESSAGE = 'This user is not authorized to approve this {DISPLAY_NAME}'
APPROVAL_INVALID_TOKEN_MESSAGE = 'Invalid approval token provided for this {DISPLAY_NAME}.'
REJECTION_NOT_AUTHORIZED_MESSAEGE = 'This user is not authorized to reject this {DISPLAY_NAME}'
REJECTION_INVALID_TOKEN_MESSAGE = 'Invalid rejection token provided for this {DISPLAY_NAME}.'
_id = fields.StringField(primary=True, default=lambda: str(ObjectId()))
initiation_date = fields.DateTimeField(auto_now_add=datetime.datetime.utcnow)
end_date = fields.DateTimeField(default=None)
# Sanction subclasses must have an initiated_by field
# initiated_by = fields.ForeignField('user', backref='initiated')
# Expanded: Dictionary field mapping admin IDs their approval status and relevant tokens:
# {
# 'b3k97': {
# 'has_approved': False,
# 'approval_token': 'Pew7wj1Puf7DENUPFPnXSwa1rf3xPN',
# 'rejection_token': 'TwozClTFOic2PYxHDStby94bCQMwJy'}
# }
approval_state = fields.DictionaryField()
# One of 'unapproved', 'approved', or 'rejected'
state = fields.StringField(default='unapproved')
def __repr__(self):
return '<Sanction(end_date={self.end_date}) with _id {self._id}>'.format(self=self)
@property
def pending_approval(self):
return self.state == Sanction.UNAPPROVED
@property
def is_approved(self):
return self.state == Sanction.APPROVED
@property
def is_rejected(self):
return self.state == Sanction.REJECTED
def _validate_authorizer(self, user):
return True
def add_authorizer(self, user, approved=False, save=False):
valid = self._validate_authorizer(user)
if valid and user._id not in self.approval_state:
self.approval_state[user._id] = {
'has_approved': approved,
'approval_token': tokens.encode(
{
'user_id': user._id,
'sanction_id': self._id,
'action': 'approve_{}'.format(self.SHORT_NAME)
}
),
'rejection_token': tokens.encode(
{
'user_id': user._id,
'sanction_id': self._id,
'action': 'reject_{}'.format(self.SHORT_NAME)
}
),
}
if save:
self.save()
return True
return False
def remove_authorizer(self, user):
if user._id not in self.approval_state:
return False
del self.approval_state[user._id]
self.save()
return True
def _on_approve(self, user, token):
if all(authorizer['has_approved'] for authorizer in self.approval_state.values()):
self.state = Sanction.APPROVED
self._on_complete(user)
def _on_reject(self, user, token):
"""Early termination of a Sanction"""
raise NotImplementedError('Sanction subclasses must implement an #_on_reject method')
def _on_complete(self, user):
"""When a Sanction has unanimous approval"""
raise NotImplementedError('Sanction subclasses must implement an #_on_complete method')
def approve(self, user, token):
"""Add user to approval list if user is admin and token verifies."""
try:
if self.approval_state[user._id]['approval_token'] != token:
raise InvalidSanctionApprovalToken(self.APPROVAL_INVALID_TOKEN_MESSAGE.format(DISPLAY_NAME=self.DISPLAY_NAME))
except KeyError:
raise PermissionsError(self.APPROVAL_NOT_AUTHORIZED_MESSAGE.format(DISPLAY_NAME=self.DISPLAY_NAME))
self.approval_state[user._id]['has_approved'] = True
self._on_approve(user, token)
def reject(self, user, token):
"""Cancels sanction if user is admin and token verifies."""
try:
if self.approval_state[user._id]['rejection_token'] != token:
raise InvalidSanctionRejectionToken(self.REJECTION_INVALID_TOKEN_MESSAGE.format(DISPLAY_NAME=self.DISPLAY_NAME))
except KeyError:
raise PermissionsError(self.REJECTION_NOT_AUTHORIZED_MESSAEGE.format(DISPLAY_NAME=self.DISPLAY_NAME))
self.state = Sanction.REJECTED
self._on_reject(user, token)
def forcibly_reject(self):
self.state = Sanction.REJECTED
def _notify_authorizer(self, user):
pass
def _notify_non_authorizer(self, user):
pass
def ask(self, group):
for contrib in group:
if contrib._id in self.approval_state:
self._notify_authorizer(contrib)
else:
self._notify_non_authorizer(contrib)
class EmailApprovableSanction(Sanction):
AUTHORIZER_NOTIFY_EMAIL_TEMPLATE = None
NON_AUTHORIZER_NOTIFY_EMAIL_TEMPLATE = None
VIEW_URL_TEMPLATE = ''
APPROVE_URL_TEMPLATE = ''
REJECT_URL_TEMPLATE = ''
# Store a persistant copy of urls for use when needed outside of a request context.
# This field gets automagically updated whenever models approval_state is modified
# and the model is saved
# {
# 'abcde': {
# 'approve': [APPROVAL_URL],
# 'reject': [REJECT_URL],
# }
# }
stashed_urls = fields.DictionaryField(default=dict)
@staticmethod
def _format_or_empty(template, context):
if context:
return template.format(**context)
return ''
def _view_url(self, user_id):
return self._format_or_empty(self.VIEW_URL_TEMPLATE, self._view_url_context(user_id))
def _view_url_context(self, user_id):
return None
def _approval_url(self, user_id):
return self._format_or_empty(self.APPROVE_URL_TEMPLATE, self._approval_url_context(user_id))
def _approval_url_context(self, user_id):
return None
def _rejection_url(self, user_id):
return self._format_or_empty(self.REJECT_URL_TEMPLATE, self._rejection_url_context(user_id))
def _rejection_url_context(self, user_id):
return None
def _send_approval_request_email(self, user, template, context):
mails.send_mail(
user.username,
template,
user=user,
**context
)
def _email_template_context(self, user, is_authorizer=False):
return {}
def _notify_authorizer(self, authorizer):
context = self._email_template_context(authorizer, is_authorizer=True)
if self.AUTHORIZER_NOTIFY_EMAIL_TEMPLATE:
self._send_approval_request_email(authorizer, self.AUTHORIZER_NOTIFY_EMAIL_TEMPLATE, context)
else:
raise NotImplementedError
def _notify_non_authorizer(self, user):
context = self._email_template_context(user)
if self.NON_AUTHORIZER_NOTIFY_EMAIL_TEMPLATE:
self._send_approval_request_email(user, self.NON_AUTHORIZER_NOTIFY_EMAIL_TEMPLATE, context)
else:
raise NotImplementedError
def add_authorizer(self, user, **kwargs):
super(EmailApprovableSanction, self).add_authorizer(user, **kwargs)
self.stashed_urls[user._id] = {
'view': self._view_url(user._id),
'approve': self._approval_url(user._id),
'reject': self._rejection_url(user._id)
}
self.save()
class Embargo(EmailApprovableSanction):
"""Embargo object for registrations waiting to go public."""
COMPLETED = 'completed'
DISPLAY_NAME = 'Embargo'
SHORT_NAME = 'embargo'
AUTHORIZER_NOTIFY_EMAIL_TEMPLATE = mails.PENDING_EMBARGO_ADMIN
NON_AUTHORIZER_NOTIFY_EMAIL_TEMPLATE = mails.PENDING_EMBARGO_NON_ADMIN
VIEW_URL_TEMPLATE = VIEW_PROJECT_URL_TEMPLATE
APPROVE_URL_TEMPLATE = settings.DOMAIN + 'project/{node_id}/?token={token}'
REJECT_URL_TEMPLATE = settings.DOMAIN + 'project/{node_id}/?token={token}'
initiated_by = fields.ForeignField('user', backref='embargoed')
for_existing_registration = fields.BooleanField(default=False)
@property
def is_completed(self):
return self.state == self.COMPLETED
@property
def embargo_end_date(self):
if self.state == self.APPROVED:
return self.end_date
return False
# NOTE(hrybacki): Old, private registrations are grandfathered and do not
# require to be made public or embargoed. This field differentiates them
# from new registrations entering into an embargo field which should not
# show up in any search related fields.
@property
def pending_registration(self):
return not self.for_existing_registration and self.pending_approval
def __repr__(self):
parent_registration = None
try:
parent_registration = Node.find_one(Q('embargo', 'eq', self))
except NoResultsFound:
pass
return ('<Embargo(parent_registration={0}, initiated_by={1}, '
'end_date={2}) with _id {3}>').format(
parent_registration,
self.initiated_by,
self.end_date,
self._id
)
def _view_url_context(self, user_id):
registration = Node.find_one(Q('embargo', 'eq', self))
return {
'node_id': registration._id
}
def _approval_url_context(self, user_id):
approval_token = self.approval_state.get(user_id, {}).get('approval_token')
if approval_token:
registration = Node.find_one(Q('embargo', 'eq', self))
return {
'node_id': registration._id,
'token': approval_token,
}
def _rejection_url_context(self, user_id):
rejection_token = self.approval_state.get(user_id, {}).get('rejection_token')
if rejection_token:
registration = Node.find_one(Q('embargo', 'eq', self))
return {
'node_id': registration._id,
'token': rejection_token,
}
def _email_template_context(self, user, is_authorizer=False, urls=None):
urls = urls or self.stashed_urls.get(user._id, {})
registration_link = urls.get('view', self._view_url(user._id))
if is_authorizer:
approval_link = urls.get('approve', '')
disapproval_link = urls.get('reject', '')
approval_time_span = settings.EMBARGO_PENDING_TIME.days * 24
registration = Node.find_one(Q('embargo', 'eq', self))
return {
'is_initiator': self.initiated_by == user,
'initiated_by': self.initiated_by.fullname,
'approval_link': approval_link,
'project_name': registration.title,
'disapproval_link': disapproval_link,
'registration_link': registration_link,
'embargo_end_date': self.end_date,
'approval_time_span': approval_time_span,
}
else:
return {
'initiated_by': self.initiated_by.fullname,
'registration_link': registration_link,
'embargo_end_date': self.end_date,
}
def _validate_authorizer(self, user):
registration = Node.find_one(Q('embargo', 'eq', self))
return registration.has_permission(user, ADMIN)
def _on_reject(self, user, token):
parent_registration = Node.find_one(Q('embargo', 'eq', self))
parent_registration.registered_from.add_log(
action=NodeLog.EMBARGO_CANCELLED,
params={
'node': parent_registration._id,
'embargo_id': self._id,
},
auth=Auth(user),
)
# Remove backref to parent project if embargo was for a new registration
if not self.for_existing_registration:
parent_registration.delete_registration_tree(save=True)
parent_registration.registered_from = None
# Delete parent registration if it was created at the time the embargo was initiated
if not self.for_existing_registration:
parent_registration.is_deleted = True
parent_registration.save()
def disapprove_embargo(self, user, token):
"""Cancels retraction if user is admin and token verifies."""
self.reject(user, token)
def _on_complete(self, user):
parent_registration = Node.find_one(Q('embargo', 'eq', self))
parent_registration.registered_from.add_log(
action=NodeLog.EMBARGO_APPROVED,
params={
'node': parent_registration._id,
'embargo_id': self._id,
},
auth=Auth(self.initiated_by),
)
self.save()
def approve_embargo(self, user, token):
"""Add user to approval list if user is admin and token verifies."""
self.approve(user, token)
class Retraction(EmailApprovableSanction):
"""Retraction object for public registrations."""
DISPLAY_NAME = 'Retraction'
SHORT_NAME = 'retraction'
AUTHORIZER_NOTIFY_EMAIL_TEMPLATE = mails.PENDING_RETRACTION_ADMIN
NON_AUTHORIZER_NOTIFY_EMAIL_TEMPLATE = mails.PENDING_RETRACTION_NON_ADMIN
VIEW_URL_TEMPLATE = VIEW_PROJECT_URL_TEMPLATE
APPROVE_URL_TEMPLATE = settings.DOMAIN + 'project/{node_id}/?token={token}'
REJECT_URL_TEMPLATE = settings.DOMAIN + 'project/{node_id}/?token={token}'
initiated_by = fields.ForeignField('user', backref='initiated')
justification = fields.StringField(default=None, validate=MaxLengthValidator(2048))
def __repr__(self):
parent_registration = None
try:
parent_registration = Node.find_one(Q('retraction', 'eq', self))
except NoResultsFound:
pass
return ('<Retraction(parent_registration={0}, initiated_by={1}) '
'with _id {2}>').format(
parent_registration,
self.initiated_by,
self._id
)
def _view_url_context(self, user_id):
registration = Node.find_one(Q('retraction', 'eq', self))
return {
'node_id': registration._id
}
def _approval_url_context(self, user_id):
approval_token = self.approval_state.get(user_id, {}).get('approval_token')
if approval_token:
registration = Node.find_one(Q('retraction', 'eq', self))
return {
'node_id': registration._id,
'token': approval_token,
}
def _rejection_url_context(self, user_id):
rejection_token = self.approval_state.get(user_id, {}).get('rejection_token')
if rejection_token:
registration = Node.find_one(Q('retraction', 'eq', self))
return {
'node_id': registration._id,
'token': rejection_token,
}
def _email_template_context(self, user, is_authorizer=False, urls=None):
urls = urls or self.stashed_urls.get(user._id, {})
registration_link = urls.get('view', self._view_url(user._id))
if is_authorizer:
approval_link = urls.get('approve', '')
disapproval_link = urls.get('reject', '')
approval_time_span = settings.RETRACTION_PENDING_TIME.days * 24
registration = Node.find_one(Q('retraction', 'eq', self))
return {
'is_initiator': self.initiated_by == user,
'initiated_by': self.initiated_by.fullname,
'project_name': registration.title,
'registration_link': registration_link,
'approval_link': approval_link,
'disapproval_link': disapproval_link,
'approval_time_span': approval_time_span,
}
else:
return {
'initiated_by': self.initiated_by.fullname,
'registration_link': registration_link,
}
def _on_reject(self, user, token):
parent_registration = Node.find_one(Q('retraction', 'eq', self))
parent_registration.registered_from.add_log(
action=NodeLog.RETRACTION_CANCELLED,
params={
'node': parent_registration._id,
'retraction_id': self._id,
},
auth=Auth(user),
save=True,
)
def _on_complete(self, user):
parent_registration = Node.find_one(Q('retraction', 'eq', self))
parent_registration.registered_from.add_log(
action=NodeLog.RETRACTION_APPROVED,
params={
'node': parent_registration._id,
'retraction_id': self._id,
},
auth=Auth(self.initiated_by),
)
# Remove any embargoes associated with the registration
if parent_registration.embargo_end_date or parent_registration.is_pending_embargo:
parent_registration.embargo.state = self.REJECTED
parent_registration.registered_from.add_log(
action=NodeLog.EMBARGO_CANCELLED,
params={
'node': parent_registration._id,
'embargo_id': parent_registration.embargo._id,
},
auth=Auth(self.initiated_by),
)
parent_registration.embargo.save()
# Ensure retracted registration is public
if not parent_registration.is_public:
parent_registration.set_privacy('public')
parent_registration.update_search()
# Retraction status is inherited from the root project, so we
# need to recursively update search for every descendant node
# so that retracted subrojects/components don't appear in search
for node in parent_registration.get_descendants_recursive():
node.update_search()
self.save()
def approve_retraction(self, user, token):
self.approve(user, token)
def disapprove_retraction(self, user, token):
self.reject(user, token)
class RegistrationApproval(EmailApprovableSanction):
DISPLAY_NAME = 'Approval'
SHORT_NAME = 'registration_approval'
AUTHORIZER_NOTIFY_EMAIL_TEMPLATE = mails.PENDING_REGISTRATION_ADMIN
NON_AUTHORIZER_NOTIFY_EMAIL_TEMPLATE = mails.PENDING_REGISTRATION_NON_ADMIN
VIEW_URL_TEMPLATE = VIEW_PROJECT_URL_TEMPLATE
APPROVE_URL_TEMPLATE = settings.DOMAIN + 'project/{node_id}/?token={token}'
REJECT_URL_TEMPLATE = settings.DOMAIN + 'project/{node_id}/?token={token}'
initiated_by = fields.ForeignField('user', backref='registration_approved')
def _view_url_context(self, user_id):
registration = Node.find_one(Q('registration_approval', 'eq', self))
return {
'node_id': registration._id
}
def _approval_url_context(self, user_id):
approval_token = self.approval_state.get(user_id, {}).get('approval_token')
if approval_token:
registration = Node.find_one(Q('registration_approval', 'eq', self))
return {
'node_id': registration._id,
'token': approval_token,
}
def _rejection_url_context(self, user_id):
rejection_token = self.approval_state.get(user_id, {}).get('rejection_token')
if rejection_token:
registration = Node.find_one(Q('registration_approval', 'eq', self))
return {
'node_id': registration._id,
'token': rejection_token,
}
def _email_template_context(self, user, is_authorizer=False, urls=None):
urls = urls or self.stashed_urls.get(user._id, {})
registration_link = urls.get('view', self._view_url(user._id))
if is_authorizer:
approval_link = urls.get('approve', '')
disapproval_link = urls.get('reject', '')
approval_time_span = settings.REGISTRATION_APPROVAL_TIME.days * 24
registration = Node.find_one(Q('registration_approval', 'eq', self))
return {
'is_initiator': self.initiated_by == user,
'initiated_by': self.initiated_by.fullname,
'registration_link': registration_link,
'approval_link': approval_link,
'disapproval_link': disapproval_link,
'approval_time_span': approval_time_span,
'project_name': registration.title,
}
else:
return {
'initiated_by': self.initiated_by.fullname,
'registration_link': registration_link,
}
def _add_success_logs(self, node, user):
src = node.registered_from
src.add_log(
action=NodeLog.PROJECT_REGISTERED,
params={
'parent_node': src.parent_id,
'node': src._primary_key,
'registration': node._primary_key,
},
auth=Auth(user),
save=False
)
src.save()
def _on_complete(self, user):
register = Node.find_one(Q('registration_approval', 'eq', self))
registered_from = register.registered_from
auth = Auth(self.initiated_by)
register.set_privacy('public', auth, log=False)
for child in register.get_descendants_recursive(lambda n: n.primary):
child.set_privacy('public', auth, log=False)
# Accounts for system actions where no `User` performs the final approval
auth = Auth(user) if user else None
registered_from.add_log(
action=NodeLog.REGISTRATION_APPROVAL_APPROVED,
params={
'node': registered_from._id,
'registration_approval_id': self._id,
},
auth=auth,
)
for node in register.root.node_and_primary_descendants():
self._add_success_logs(node, user)
node.update_search() # update search if public
self.save()
def _on_reject(self, user, token):
register = Node.find_one(Q('registration_approval', 'eq', self))
registered_from = register.registered_from
register.delete_registration_tree(save=True)
registered_from.add_log(
action=NodeLog.REGISTRATION_APPROVAL_CANCELLED,
params={
'node': register._id,
'registration_approval_id': self._id,
},
auth=Auth(user),
)
|
petermalcolm/osf.io
|
website/project/model.py
|
Python
|
apache-2.0
| 129,819
|
[
"VisIt"
] |
5be1ad62358a4e35f2ccf1ab35dce0682f8d0b939752f94ad099b5a73b0bce6b
|
#!/usr/bin/env python
import os
from setuptools import setup
PROJECT = u'gnucash-util'
VERSION = '0.1'
URL = 'https://github.com/bstpierre/gnucash-util'
AUTHOR = u'Brian St. Pierre'
AUTHOR_EMAIL = u'brian@bstpierre.org'
DESC = "A collection of utilities for automating GnuCash 2.4+."
def read_file(file_name):
file_path = os.path.join(
os.path.dirname(__file__),
file_name
)
return open(file_path).read()
setup(
name=PROJECT,
version=VERSION,
description=DESC,
long_description=read_file('README.rst'),
author=AUTHOR,
author_email=AUTHOR_EMAIL,
url=URL,
license='MIT License',
packages=['gnucash_util'],
scripts=['scripts/gnc-freshbooks-import-invoice', ],
include_package_data=True,
install_requires=[
# -*- Requirements -*-
## GnuCash-2.4
],
entry_points = {
# -*- Entry points -*-
},
classifiers=[
# -*- Classifiers -*-
"Development Status :: 3 - Alpha",
"Environment :: Console",
"License :: OSI Approved :: ISC License (ISCL)",
"Natural Language :: English",
"Programming Language :: Python",
"Topic :: Office/Business :: Financial :: Accounting",
]
)
|
bstpierre/gnucash-util
|
setup.py
|
Python
|
mit
| 1,242
|
[
"Brian"
] |
1b2f86b61178e82b01053defef230467007ac9563d52653c44b7c9ba1a35d6e4
|
#!/usr/bin/env python2.4
"""
Script that imports locally stored data as a new dataset for the user
Usage: import id outputfile
"""
import sys, os
from shutil import copyfile
#tempfile, shutil
BUFFER = 1048576
uids = sys.argv[1].split(",")
out_file1 = sys.argv[2]
#remove NONE from uids
have_none = True
while have_none:
try:
uids.remove('None')
except:
have_none = False
#create dictionary keyed by uid of tuples of (displayName,filePath,build) for all files
available_files = {}
try:
for line in open( "/depot/data2/galaxy/microbes/microbial_data.loc" ):
if not line or line[0:1] == "#" : continue
fields = line.split('\t')
try:
info_type = fields.pop(0)
if info_type.upper()=="DATA":
uid = fields.pop(0)
org_num = fields.pop(0)
chr_acc = fields.pop(0)
feature = fields.pop(0)
filetype = fields.pop(0)
path = fields.pop(0).replace("\r","").replace("\n","")
file_type = filetype
build = org_num
description = uid
else:
continue
except:
continue
available_files[uid]=(description,path,build,file_type,chr_acc)
except:
print >>sys.stderr, "It appears that the configuration file for this tool is missing."
#create list of tuples of (displayName,FileName,build) for desired files
desired_files = []
for uid in uids:
try:
desired_files.append(available_files[uid])
except:
continue
#copy first file to contents of given output file
file1_copied = False
while not file1_copied:
try:
first_file = desired_files.pop(0)
except:
print >>sys.stderr, "There were no valid files requested."
sys.exit()
file1_desc, file1_path, file1_build, file1_type,file1_chr_acc = first_file
try:
copyfile(file1_path,out_file1)
print "#File1\t"+file1_desc+"\t"+file1_chr_acc+"\t"+file1_build+"\t"+file1_type
file1_copied = True
except:
print >>sys.stderr, "The file specified is missing."
continue
#print >>sys.stderr, "The file specified is missing."
#Tell post-process filter where remaining files reside
for extra_output in desired_files:
file_desc, file_path, file_build, file_type,file_chr_acc = extra_output
print "#NewFile\t"+file_desc+"\t"+file_chr_acc+"\t"+file_build+"\t"+file_path+"\t"+file_type
|
jmchilton/galaxy-central
|
tools/data_source/microbial_import.py
|
Python
|
mit
| 2,528
|
[
"Galaxy"
] |
211f1958a2722a9142f6df7ddca6916f38955a3ad021b661614271ce20227fdb
|
import os, sys, re
import optparse
import shutil
import pandas
import numpy
import gc
import subprocess
import uniprot as uni
from natsort import natsorted, ns
import warnings
##### TAKEN FROM STACK OVERFLOW ##### (to redirect Pandas warnings to STDOUT instead of STDERR for Galaxy...)
def customwarn(message, category, filename, lineno, file=None, line=None):
sys.stdout.write(warnings.formatwarning(message, category, filename, lineno))
warnings.showwarning = customwarn
#####################################
#####################################
#This is a script to combine the output reports from
#Skyline, in preparation for MSstats! Let's get started.
#
#VERSION 0.1
version="0.1"
#DATE: 5/18/2017
date="5/18/2017"
#####################################
print "-----------------------------------------------------------------------"
print "Welcome to the SAINTq wrapper for Galaxy, Wohlschlegel Lab UCLA"
print "Written by William Barshop"
print "Version: ",version
print "Date: ",date
basedir=os.getcwd()
####################################
#Argument parsing! So much fun!
#We'll use OptParse even though some
#people really rave about argparse...
#
#
# NB: With Optparse, if an option is
# not specified, it will take a
# value of None
####################################
parser = optparse.OptionParser()
parser.add_option("--mprophet_q",action="store",type="float",dest="mprophet_q") #We'll throw out things above this q-value threshold.
parser.add_option("--input_file",action="store",type="string",dest="input_file")
parser.add_option("--output_file",action="store",type="string",dest="output_file")
parser.add_option("--simple_output_file",action="store",type="string",dest="simple_output_file")
parser.add_option("--exp_file",action="store",type="string",dest="exp_file")
parser.add_option("--quant_level",action="store",type="string",dest="quant_level")#"peptide","protein",or "fragment"
parser.add_option("--best_prop_pep",action="store",type="float",dest="best_prop_pep")
parser.add_option("--min_n_pep",action="store",type="int",dest="min_n_pep")#Minimum number of peptides
parser.add_option("--best_prop_frag",action="store",type="float",dest="best_prop_frag")
parser.add_option("--min_n_frag",action="store",type="int",dest="min_n_frag")#Minimum number of fragments...
parser.add_option("--compress_n_ctrl",action="store",type="int",dest="compress_n_ctrl")
parser.add_option("--compress_n_rep",action="store",type="int",dest="compress_n_rep")
parser.add_option("--normalize_control",action="store",type="string",dest="normalize_control")#"true" or "false"
parser.add_option("--remove_repeated_peptides",action="store_true",dest="remove_repeated_peptides")
parser.add_option("--fill_missing_features",action="store_true",dest="fillMissingFeatures")
(options,args) = parser.parse_args()
group_information = pandas.read_csv(options.exp_file,sep='\t')
def appendFraction(x):
try:
#appended_name=str(x['Peptide Modified Sequence']+"_"+str(x['File Name'].split(".")[0].rsplit("-",1)[1]))# MAY HAVE TO CHANGE x['FILE NAME'] TO STR(x['FILE NAME']).... !!!!!!!!!!!! # 3/3/2016 -- BACK TO THIS... See: https://groups.google.com/forum/#!searchin/msstats/multiple$20methods/msstats/ZzP3Q8hGXBY/oTYo60cfovMJ
appended_name=str(x['Peptide Modified Sequence']+"-Frac"+str(x['File Name'].split(".")[0].rsplit("-",1)[1]))# MAY HAVE TO CHANGE x['FILE NAME'] TO STR(x['FILE NAME']).... !!!!!!!!!!!! # 3/3/2016 changed to - to make sure we aren't screwing with MSstats
return appended_name
except:
print "FAILED ON CORRECTING FRACTION NAMES ON",x
sys.exit(0)
def fixFileName(x):
return str(x['File Name'].split('.')[0].rsplit("-",1)[0])#[:-3])
def peptide_level_fixer(x):
return x.split("_")[0]
#return x['Peptide Modified Sequence'].split("_")[0]
if options.quant_level!="protein":
input_data=pandas.read_csv(options.input_file,sep=',',index_col=False)
input_data.rename(columns={'Protein Name':'ProteinName'},inplace=True)
combined_results=input_data[numpy.invert(input_data.ProteinName.str.contains("Decoys"))]
combined_results.rename(columns={'ProteinName':'Protein Name'},inplace=True)
combined_results.sort_values(by='Protein Name',inplace=True)
if options.mprophet_q is not None:
combined_results.loc[combined_results['annotation_QValue']>options.mprophet_q,'Area']=0.0
if options.quant_level=="peptide":
combined_results['Precursor Charge']=combined_results['Precursor Charge'].astype(str)
combined_results['unique_name']=combined_results['Peptide Modified Sequence']+"_"+combined_results['Precursor Charge']+"_"+combined_results['File Name']+"_"+combined_results['Protein Name']
combined_results['Precursor Charge']=combined_results['Precursor Charge'].astype(int)
groups=combined_results.groupby(by=['unique_name'],as_index=False).agg({'Area':numpy.sum})
combined_results.drop('Area',1,inplace=True)
merged_results=pandas.merge(combined_results,groups,on=['unique_name'])
merged_results.drop_duplicates(subset='unique_name',inplace=True)#change cols to subset for newer pandas...
merged_results['Fragment Ion']="sum"
combined_results=merged_results
column_list=combined_results.columns.tolist()
column_list.append(column_list.pop(column_list.index('Standard Type')))
column_list.append(column_list.pop(column_list.index('Truncated')))
combined_results.reindex(columns=column_list)
combined_results.drop('unique_name',1,inplace=True)
combined_results.fillna(value=0.0,inplace=True)
combined_results['Precursor Charge']=combined_results['Precursor Charge'].astype(str)
if options.remove_repeated_peptides:
combined_results['Precursor Charge']=combined_results['Precursor Charge'].astype(str)
combined_results['unique_name']=combined_results['Peptide Modified Sequence']+"_"+combined_results['Precursor Charge']+"_"+combined_results['Fragment Ion']+"_"+combined_results['File Name']
combined_results['Precursor Charge']=combined_results['Precursor Charge'].astype(int)
print "Removing duplicate peptides...",len(combined_results)
combined_results.drop_duplicates(subset='unique_name',keep=False,inplace=True)
print "Done!",len(combined_results)
combined_results.drop('unique_name',1,inplace=True)
#if options.rename:
# combined_results.rename(columns={'Protein Name':'ProteinName','Peptide Modified Sequence':'PeptideSequence','Precursor Charge':'PrecursorCharge','Fragment Ion':'FragmentIon','Product Charge':'ProductCharge','Isotope Label Type':'IsotopeLabelType','Standard Type':'StandardType','File Name':'Run','Area':'Intensity'},inplace=True)
if options.fillMissingFeatures:
bioreplicate_dict={}
condition_dict={}
for each_run in combined_results['File Name'].unique():
temp=combined_results[combined_results['File Name']==each_run]
bioreplicate_dict[each_run]=temp['BioReplicate'].unique()[0]
condition_dict[each_run]=temp['Condition'].unique()[0]
grouped_df=combined_results.groupby(["Peptide Modified Sequence","Precursor Charge","Fragment Ion","Product Charge"])
concat_list=[]
correct_length=len(bioreplicate_dict.keys())
for name,eachgroup in grouped_df:
if len(eachgroup)!=correct_length:
for each_name in bioreplicate_dict.keys():#name_list:
if each_name not in eachgroup['File Name'].unique():
new_row=eachgroup.head(n=1).copy(deep=True)
new_row['File Name']=each_name
new_row['Intensity']=numpy.nan
new_row['Condition']=condition_dict[each_name]
new_row['BioReplicate']=bioreplicate_dict[each_name]
concat_list.append(new_row)
concat_list.append(combined_results)
combined_results=pandas.concat(concat_list)
#combined_results=pandas.concat([combined_results,new_row])
if options.quant_level=="peptide":
combined_results['Precursor Charge']=combined_results['Precursor Charge'].astype(str)
combined_results['Product Charge']=combined_results['Product Charge'].astype(str)
combined_results['unique_nofile_name']=combined_results['Peptide Modified Sequence']+"_"+combined_results['Precursor Charge']+"_"+combined_results['Protein Name']+"_"+combined_results["Fragment Ion"]+"_"+combined_results["Product Charge"]
combined_results['Precursor Charge']=combined_results['Precursor Charge'].astype(int)
combined_results['Product Charge']=combined_results['Product Charge'].astype(int)
combined_results['file']=combined_results['Condition']+"_"+combined_results['File Name']
combined_results.fillna(value=0.0,inplace=True)
pivot_df=combined_results[['unique_nofile_name','file','Area']]
pivot_df=pivot_df.pivot(index='unique_nofile_name',columns='file',values='Area')
combined_results=combined_results[['Protein Name','Peptide Modified Sequence','Precursor Charge','unique_nofile_name']]
combined_results['Precursor Charge']=combined_results['Precursor Charge'].astype(str)
combined_results['peptide']=combined_results['Peptide Modified Sequence']+"_"+combined_results['Precursor Charge']
combined_results=combined_results.drop_duplicates(subset=['unique_nofile_name'])
combined_results=combined_results.merge(right=pivot_df, left_on='unique_nofile_name',right_index=True)
combined_results=combined_results.drop('unique_nofile_name', 1)
combined_results=combined_results.drop('Precursor Charge', 1)
combined_results=combined_results.drop('Peptide Modified Sequence', 1)
combined_results.sort_values(by=['peptide'],inplace=True)
combined_results.rename(columns={'Protein Name':'Protein'},inplace=True)
elif options.quant_level=="fragment":
combined_results['Precursor Charge']=combined_results['Precursor Charge'].astype(str)
combined_results['Product Charge']=combined_results['Product Charge'].astype(str)
combined_results['unique_nofile_name']=combined_results['Peptide Modified Sequence']+"_"+combined_results['Precursor Charge']+"_"+combined_results['Protein Name']+"_"+combined_results["Fragment Ion"]+"_"+combined_results["Product Charge"]
combined_results['unique_name']=combined_results['Peptide Modified Sequence']+"_"+combined_results['Precursor Charge']+"_"+combined_results['Protein Name']+"_"+combined_results["Fragment Ion"]+"_"+combined_results["Product Charge"]+"_"+combined_results['File Name']
combined_results['Precursor Charge']=combined_results['Precursor Charge'].astype(int)
combined_results['Product Charge']=combined_results['Product Charge'].astype(int)
combined_results['file']=combined_results['Condition']+"_"+combined_results['File Name']
combined_results.fillna(value=0.0,inplace=True)
pivot_df=combined_results[['unique_nofile_name','file','Area','unique_name']]
pivot_df.drop_duplicates(subset='unique_name',inplace=True)
pivot_df.drop('unique_name',1,inplace=True)
pivot_df=pivot_df.pivot(index='unique_nofile_name',columns='file',values='Area')
combined_results=combined_results[['Protein Name','Peptide Modified Sequence','Precursor Charge','Fragment Ion','Product Charge','unique_nofile_name']]
combined_results['Precursor Charge']=combined_results['Precursor Charge'].astype(str)
combined_results['Product Charge']=combined_results['Product Charge'].astype(str)
combined_results['peptide']=combined_results['Peptide Modified Sequence']+"_"+combined_results['Precursor Charge']
combined_results['fragment']=combined_results['Fragment Ion']+"_"+combined_results['Product Charge']
combined_results=combined_results.drop_duplicates(subset=['unique_nofile_name'])
combined_results=combined_results.merge(right=pivot_df, left_on='unique_nofile_name',right_index=True)
combined_results=combined_results.drop('unique_nofile_name', 1)
combined_results=combined_results.drop('Precursor Charge', 1)
combined_results=combined_results.drop('Peptide Modified Sequence', 1)
combined_results=combined_results.drop('Product Charge', 1)
combined_results=combined_results.drop('Fragment Ion', 1)
combined_results.sort_values(by=['peptide'],inplace=True)
combined_results.rename(columns={'Protein Name':'Protein'},inplace=True)
elif options.quant_level=="protein":
combined_results=pandas.read_csv(options.input_file,sep=',',index_col=0)
#combined_results=combined_results
combined_results.update(combined_results[combined_results.select_dtypes(include=['number']).columns].applymap(numpy.exp2),overwrite=True)
combined_results.fillna(value=0.0,inplace=True)
#print combined_results
#pass
combined_results.to_csv("SAINTq_combined_input.tsv",sep="\t",index=False)
print "We have now written the combined SAINTq intensity input file to SAINTq_combined_input.csv"
#Let's start by reading in the experiment structure.
#We're going to have to add the Control/Test headers for SAINTq.
header=True
with open("SAINTq_combined_input.tsv",'rb') as tab_reader:
with open("final_saint_input_file.tsv",'wb') as final_writer:
for each_row in tab_reader:
if header==True:
header=False
columns=each_row.split("\t")
if options.quant_level=="protein":
starting_index=1 #We skip the Protein column
elif options.quant_level=="peptide":
starting_index=2 #Skip protein and Peptide columns
elif options.quant_level=="fragment":
starting_index=3 #Skip protein, peptide and fragment columns
#print "all columns...",columns
#print "starting index is...",starting_index
run_samples=columns[starting_index:]#We skip the Protein column
run_T_or_C_vector=[] #populate with tabs, C, or T's
run_condition_vector=[] #populate with tabs, and conditions
for x in xrange(0,starting_index): #prepare the number of tabs needed...
run_T_or_C_vector.append("")
run_condition_vector.append("")
for each_sample in run_samples:
if options.quant_level=="protein":
#print each_sample
run_condition=each_sample.rsplit("_",1)[0]
#print run_condition,"this is run cond"
#print group_information[group_information['Biological Condition']==run_condition]
if "T" in group_information[group_information['Biological Condition']==run_condition]['Test or Control'].tolist()[0]:
run_T_or_C_vector.append("T")
else:
run_T_or_C_vector.append("C")
else:
run_condition= group_information[group_information['Original File Name']==each_sample.rsplit(".",1)[0].split("_",1)[1]]['Biological Condition'].tolist()[0]
if "T" in group_information[group_information['Biological Condition']==run_condition]['Test or Control'].unique().tolist():
run_T_or_C_vector.append("T")
else:
run_T_or_C_vector.append("C")
run_condition_vector.append(run_condition)
run_condition_vector[-1]+="\n"
run_T_or_C_vector[-1]+="\n"
final_writer.write("\t".join(run_T_or_C_vector))
final_writer.write("\t".join(run_condition_vector))
final_writer.write(each_row)
with open("saintq_configuration",'wb') as config_writer:
config_writer.write("normalize_control="+options.normalize_control+"\n")
config_writer.write("input_filename=final_saint_input_file.tsv\n")
config_writer.write("input_level="+options.quant_level+"\n")
config_writer.write("protein_colname=Protein\n")
if options.quant_level=="peptide" or options.quant_level=="fragment":
config_writer.write("pep_colname=peptide\n")
if options.quant_level=="fragment":
config_writer.write("frag_colname=fragment\n")
config_writer.write("compress_n_ctrl={0}\n".format(options.compress_n_ctrl))
config_writer.write("compress_n_rep={0}\n".format(options.compress_n_rep))
if options.quant_level=="peptide" or options.quant_level=="fragment":
config_writer.write("min_n_pep={0}\n".format(options.min_n_pep))
config_writer.write("best_prop_pep={0}\n".format(options.best_prop_pep))
if options.quant_level=="fragment":
config_writer.write("min_n_frag={0}\n".format(options.min_n_frag))
config_writer.write("best_prop_frag={0}\n".format(options.best_prop_frag))
os.system("saintq saintq_configuration")
for each_file in os.listdir(os.getcwd()):
if "scores_list_" in each_file:
break
#copy raw output...
shutil.copy(each_file,options.output_file)
raw_df=pandas.read_csv(options.output_file,sep='\t')
pivot_output=raw_df.pivot(index='Prey',columns='Bait',values='AvgP')
pivot_output.fillna(value=0.0,inplace=True)
pivot_output.to_csv(options.simple_output_file,sep="\t")
#print pivot_output
#shutil.copy(simple_output,options.simple_output_file)
print "All done!"
|
wohllab/milkyway_proteomics
|
galaxy_milkyway_files/tools/wohl-proteomics/SAINTq/saintq_wrapper.py
|
Python
|
mit
| 17,364
|
[
"Galaxy"
] |
8502b6ad5e415d1764e84bbbbd47b05a0aaaf4d6ff1c1787ffc35ebce90cdb6b
|
"""
Distributions
-------------
A widget for plotting attribute distributions.
"""
from math import sqrt
import sys
import collections
from xml.sax.saxutils import escape
from PyQt4 import QtGui, QtCore
from PyQt4.QtCore import Qt
from PyQt4.QtGui import QToolTip
import numpy
import pyqtgraph as pg
import Orange.data
from Orange.statistics import distribution, contingency
from Orange.widgets import widget, gui, settings
from Orange.widgets.utils import itemmodels
from Orange.widgets.widget import InputSignal
from Orange.widgets.visualize.owlinearprojection import LegendItem, ScatterPlotItem
from Orange.widgets.io import FileFormat
from .owscatterplotgraph import HelpEventDelegate
def selected_index(view):
"""Return the selected integer `index` (row) in the view.
If no index is selected return -1
`view` must be in single selection mode.
"""
indices = view.selectedIndexes()
assert len(indices) < 2, "View must be in single selection mode"
if indices:
return indices[0].row()
else:
return -1
class DistributionBarItem(pg.GraphicsObject):
def __init__(self, geometry, dist, colors):
super().__init__()
self.geometry = geometry
self.dist = dist
self.colors = colors
self.__picture = None
def paint(self, painter, options, widget):
if self.__picture is None:
self.__paint()
painter.drawPicture(0, 0, self.__picture)
def boundingRect(self):
return self.geometry
def __paint(self):
picture = QtGui.QPicture()
painter = QtGui.QPainter(picture)
pen = QtGui.QPen(QtGui.QBrush(Qt.white), 0.5)
pen.setCosmetic(True)
painter.setPen(pen)
geom = self.geometry
x, y = geom.x(), geom.y()
w, h = geom.width(), geom.height()
wsingle = w / len(self.dist)
for d, c in zip(self.dist, self.colors):
painter.setBrush(QtGui.QBrush(c))
painter.drawRect(QtCore.QRectF(x, y, wsingle, d * h))
x += wsingle
painter.end()
self.__picture = picture
class OWDistributions(widget.OWWidget):
name = "Distributions"
description = "Display value distributions of a data feature in a graph."
icon = "icons/Distribution.svg"
priority = 100
inputs = [InputSignal("Data", Orange.data.Table, "set_data",
doc="Set the input data set")]
settingsHandler = settings.DomainContextHandler(
match_values=settings.DomainContextHandler.MATCH_VALUES_ALL)
#: Selected variable index
variable_idx = settings.ContextSetting(-1)
#: Selected group variable
groupvar_idx = settings.ContextSetting(0)
relative_freq = settings.Setting(False)
disc_cont = settings.Setting(False)
smoothing_index = settings.Setting(5)
show_prob = settings.ContextSetting(0)
graph_name = "plot"
ASH_HIST = 50
bins = [ 2, 3, 4, 5, 8, 10, 12, 15, 20, 30, 50 ]
smoothing_facs = list(reversed([ 0.1, 0.2, 0.4, 0.6, 0.8, 1, 1.5, 2, 4, 6, 10 ]))
def __init__(self):
super().__init__()
self.data = None
self.distributions = None
self.contingencies = None
self.var = self.cvar = None
varbox = gui.widgetBox(self.controlArea, "Variable")
self.varmodel = itemmodels.VariableListModel()
self.groupvarmodel = []
self.varview = QtGui.QListView(
selectionMode=QtGui.QListView.SingleSelection)
self.varview.setSizePolicy(
QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
self.varview.setModel(self.varmodel)
self.varview.setSelectionModel(
itemmodels.ListSingleSelectionModel(self.varmodel))
self.varview.selectionModel().selectionChanged.connect(
self._on_variable_idx_changed)
varbox.layout().addWidget(self.varview)
box = gui.widgetBox(self.controlArea, "Precision")
gui.separator(self.controlArea, 4, 4)
box2 = gui.widgetBox(box, orientation="horizontal")
self.l_smoothing_l = gui.widgetLabel(box2, "Smooth")
gui.hSlider(box2, self, "smoothing_index",
minValue=0, maxValue=len(self.smoothing_facs) - 1,
callback=self._on_set_smoothing, createLabel=False)
self.l_smoothing_r = gui.widgetLabel(box2, "Precise")
self.cb_disc_cont = gui.checkBox(
gui.indentedBox(box, sep=4),
self, "disc_cont", "Bin continuous variables",
callback=self._on_groupvar_idx_changed,
tooltip="Show continuous variables as discrete.")
box = gui.widgetBox(self.controlArea, "Group by")
self.icons = gui.attributeIconDict
self.groupvarview = gui.comboBox(box, self, "groupvar_idx",
callback=self._on_groupvar_idx_changed, valueType=str,
contentsLength=12)
box2 = gui.indentedBox(box, sep=4)
self.cb_rel_freq = gui.checkBox(
box2, self, "relative_freq", "Show relative frequencies",
callback=self._on_relative_freq_changed,
tooltip="Normalize probabilities so that probabilities for each group-by value sum to 1.")
gui.separator(box2)
self.cb_prob = gui.comboBox(
box2, self, "show_prob", label="Show probabilities",
orientation="horizontal",
callback=self._on_relative_freq_changed,
tooltip="Show probabilities for a chosen group-by value (at each point probabilities for all group-by values sum to 1).")
self.plotview = pg.PlotWidget(background=None)
self.plotview.setRenderHint(QtGui.QPainter.Antialiasing)
self.mainArea.layout().addWidget(self.plotview)
w = QtGui.QLabel()
w.setSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Fixed)
self.mainArea.layout().addWidget(w, Qt.AlignCenter)
self.ploti = pg.PlotItem()
self.plot = self.ploti.vb
self.ploti.hideButtons()
self.plotview.setCentralItem(self.ploti)
self.plot_prob = pg.ViewBox()
self.ploti.hideAxis('right')
self.ploti.scene().addItem(self.plot_prob)
self.ploti.getAxis("right").linkToView(self.plot_prob)
self.ploti.getAxis("right").setLabel("Probability")
self.plot_prob.setZValue(10)
self.plot_prob.setXLink(self.ploti)
self.update_views()
self.ploti.vb.sigResized.connect(self.update_views)
self.plot_prob.setRange(yRange=[0,1])
self.inline_graph_report()
def disable_mouse(plot):
plot.setMouseEnabled(False, False)
plot.setMenuEnabled(False)
disable_mouse(self.plot)
disable_mouse(self.plot_prob)
self.tooltip_items = []
self.plot.scene().installEventFilter(
HelpEventDelegate(self.help_event, self))
pen = QtGui.QPen(self.palette().color(QtGui.QPalette.Text))
for axis in ("left", "bottom"):
self.ploti.getAxis(axis).setPen(pen)
self._legend = LegendItem()
self._legend.setParentItem(self.plot)
self._legend.hide()
self._legend.anchor((1, 0), (1, 0))
def update_views(self):
self.plot_prob.setGeometry(self.plot.sceneBoundingRect())
self.plot_prob.linkedViewChanged(self.plot, self.plot_prob.XAxis)
def set_data(self, data):
self.closeContext()
self.clear()
self.data = data
if self.data is not None:
domain = self.data.domain
self.varmodel[:] = list(domain)
self.groupvarview.clear()
self.groupvarmodel = \
["(None)"] + [var for var in domain if var.is_discrete]
self.groupvarview.addItem("(None)")
for var in self.groupvarmodel[1:]:
self.groupvarview.addItem(self.icons[var], var.name)
if domain.has_discrete_class:
self.groupvar_idx = \
self.groupvarmodel[1:].index(domain.class_var) + 1
self.openContext(domain)
self.variable_idx = min(max(self.variable_idx, 0),
len(self.varmodel) - 1)
self.groupvar_idx = min(max(self.groupvar_idx, 0),
len(self.groupvarmodel) - 1)
itemmodels.select_row(self.varview, self.variable_idx)
self._setup()
def clear(self):
self.plot.clear()
self.plot_prob.clear()
self.varmodel[:] = []
self.groupvarmodel = []
self.variable_idx = -1
self.groupvar_idx = 0
self._legend.clear()
self._legend.hide()
def _setup_smoothing(self):
if not self.disc_cont and self.var and self.var.is_continuous:
self.cb_disc_cont.setText("Bin continuous variables")
self.l_smoothing_l.setText("Smooth")
self.l_smoothing_r.setText("Precise")
else:
self.cb_disc_cont.setText("Bin continuous variables into {} bins".
format(self.bins[self.smoothing_index]))
self.l_smoothing_l.setText(" " + str(self.bins[0]))
self.l_smoothing_r.setText(" " + str(self.bins[-1]))
def _setup(self):
self.plot.clear()
self.plot_prob.clear()
self._legend.clear()
self._legend.hide()
varidx = self.variable_idx
self.var = self.cvar = None
if varidx >= 0:
self.var = self.varmodel[varidx]
if self.groupvar_idx > 0:
self.cvar = self.groupvarmodel[self.groupvar_idx]
self.cb_prob.clear()
self.cb_prob.addItem("(None)")
self.cb_prob.addItems(self.cvar.values)
self.cb_prob.addItem("(All)")
self.show_prob = min(max(self.show_prob, 0),
len(self.cvar.values) + 1)
data = self.data
self._setup_smoothing()
if self.var is None:
return
if self.disc_cont:
data = self.data[:, (self.var, self.cvar) if self.cvar else self.var ]
disc = Orange.preprocess.discretize.EqualWidth(n=self.bins[self.smoothing_index])
data = Orange.preprocess.Discretize(data, method=disc,
remove_const=False)
self.var = data.domain[0]
self.set_left_axis_name()
self.enable_disable_rel_freq()
if self.cvar:
self.contingencies = \
contingency.get_contingency(data, self.var, self.cvar)
self.display_contingency()
else:
self.distributions = \
distribution.get_distribution(data, self.var)
self.display_distribution()
self.plot.autoRange()
def help_event(self, ev):
in_graph_coor = self.plot.mapSceneToView(ev.scenePos())
ctooltip = []
for vb, item in self.tooltip_items:
if isinstance(item, pg.PlotCurveItem) and item.mouseShape().contains(vb.mapSceneToView(ev.scenePos())):
ctooltip.append(item.tooltip)
elif isinstance(item, DistributionBarItem) and item.boundingRect().contains(vb.mapSceneToView(ev.scenePos())):
ctooltip.append(item.tooltip)
if ctooltip:
QToolTip.showText(ev.screenPos(), "\n\n".join(ctooltip), widget=self.plotview)
return True
return False
def display_distribution(self):
dist = self.distributions
var = self.var
assert len(dist) > 0
self.plot.clear()
self.plot_prob.clear()
self.ploti.hideAxis('right')
self.tooltip_items = []
bottomaxis = self.ploti.getAxis("bottom")
bottomaxis.setLabel(var.name)
bottomaxis.resizeEvent()
self.set_left_axis_name()
if var and var.is_continuous:
bottomaxis.setTicks(None)
if not len(dist[0]):
return
edges, curve = ash_curve(dist, None, m=OWDistributions.ASH_HIST,
smoothing_factor=self.smoothing_facs[self.smoothing_index])
edges = edges + (edges[1] - edges[0])/2
edges = edges[:-1]
item = pg.PlotCurveItem()
pen = QtGui.QPen(QtGui.QBrush(Qt.white), 3)
pen.setCosmetic(True)
item.setData(edges, curve, antialias=True, stepMode=False,
fillLevel=0, brush=QtGui.QBrush(Qt.gray), pen=pen)
self.plot.addItem(item)
item.tooltip = "Density"
self.tooltip_items.append((self.plot, item))
else:
bottomaxis.setTicks([list(enumerate(var.values))])
for i, w in enumerate(dist):
geom = QtCore.QRectF(i - 0.33, 0, 0.66, w)
item = DistributionBarItem(geom, [1.0],
[QtGui.QColor(128, 128, 128)])
self.plot.addItem(item)
item.tooltip = "Frequency for %s: %r" % (var.values[i], w)
self.tooltip_items.append((self.plot, item))
def _on_relative_freq_changed(self):
self.set_left_axis_name()
if self.cvar and self.cvar.is_discrete:
self.display_contingency()
else:
self.display_distribution()
self.plot.autoRange()
def display_contingency(self):
"""
Set the contingency to display.
"""
cont = self.contingencies
var, cvar = self.var, self.cvar
assert len(cont) > 0
self.plot.clear()
self.plot_prob.clear()
self._legend.clear()
self.tooltip_items = []
if self.show_prob:
self.ploti.showAxis('right')
else:
self.ploti.hideAxis('right')
bottomaxis = self.ploti.getAxis("bottom")
bottomaxis.setLabel(var.name)
bottomaxis.resizeEvent()
cvar_values = cvar.values
colors = [QtGui.QColor(*col) for col in cvar.colors]
if var and var.is_continuous:
bottomaxis.setTicks(None)
weights, cols, cvar_values, curves = [], [], [], []
for i, dist in enumerate(cont):
v, W = dist
if len(v):
weights.append(numpy.sum(W))
cols.append(colors[i])
cvar_values.append(cvar.values[i])
curves.append(ash_curve(dist, cont, m=OWDistributions.ASH_HIST,
smoothing_factor=self.smoothing_facs[self.smoothing_index]))
weights = numpy.array(weights)
sumw = numpy.sum(weights)
weights /= sumw
colors = cols
curves = [(X, Y * w) for (X, Y), w in zip(curves, weights)]
ncval = len(cvar_values)
curvesline = [] #from histograms to lines
for (X,Y) in curves:
X = X + (X[1] - X[0])/2
X = X[:-1]
X = numpy.array(X)
Y = numpy.array(Y)
curvesline.append((X,Y))
for t in [ "fill", "line" ]:
for (X, Y), color, w, cval in reversed(list(zip(curvesline, colors, weights, cvar_values))):
item = pg.PlotCurveItem()
pen = QtGui.QPen(QtGui.QBrush(color), 3)
pen.setCosmetic(True)
color = QtGui.QColor(color)
color.setAlphaF(0.2)
item.setData(X, Y/(w if self.relative_freq else 1), antialias=True, stepMode=False,
fillLevel=0 if t == "fill" else None,
brush=QtGui.QBrush(color), pen=pen)
self.plot.addItem(item)
if t == "line":
item.tooltip = ("Normalized density " if self.relative_freq else "Density ") \
+ "\n"+ cvar.name + "=" + cval
self.tooltip_items.append((self.plot, item))
if self.show_prob:
M_EST = 5 #for M estimate
all_X = numpy.array(numpy.unique(numpy.hstack([X for X,_ in curvesline])))
inter_X = numpy.array(numpy.linspace(all_X[0], all_X[-1], len(all_X)*2))
curvesinterp = [ numpy.interp(inter_X, X, Y) for (X,Y) in curvesline ]
sumprob = numpy.sum(curvesinterp, axis=0)
# allcorrection = M_EST/sumw*numpy.sum(sumprob)/len(inter_X)
legal = sumprob > 0.05 * numpy.max(sumprob)
i = len(curvesinterp) + 1
show_all = self.show_prob == i
for Y, color, cval in reversed(list(zip(curvesinterp, colors, cvar_values))):
i -= 1
if show_all or self.show_prob == i:
item = pg.PlotCurveItem()
pen = QtGui.QPen(QtGui.QBrush(color), 3, style=QtCore.Qt.DotLine)
pen.setCosmetic(True)
#prob = (Y+allcorrection/ncval)/(sumprob+allcorrection)
prob = Y[legal] / sumprob[legal]
item.setData(inter_X[legal], prob, antialias=True, stepMode=False,
fillLevel=None, brush=None, pen=pen)
self.plot_prob.addItem(item)
item.tooltip = "Probability that \n" + cvar.name + "=" + cval
self.tooltip_items.append((self.plot_prob, item))
elif var and var.is_discrete:
bottomaxis.setTicks([list(enumerate(var.values))])
cont = numpy.array(cont)
ncval = len(cvar_values)
maxh = 0 #maximal column height
maxrh = 0 #maximal relative column height
scvar = cont.sum(axis=1)
#a cvar with sum=0 with allways have distribution counts 0,
#therefore we can divide it by anything
scvar[scvar==0] = 1
for i, (value, dist) in enumerate(zip(var.values, cont.T)):
maxh = max(maxh, max(dist))
maxrh = max(maxrh, max(dist/scvar))
for i, (value, dist) in enumerate(zip(var.values, cont.T)):
dsum = sum(dist)
geom = QtCore.QRectF(i - 0.333, 0, 0.666, maxrh
if self.relative_freq else maxh)
if self.show_prob:
prob = dist / dsum
ci = 1.96 * numpy.sqrt(prob * (1 - prob) / dsum)
else:
ci = None
item = DistributionBarItem(geom, dist/scvar/maxrh
if self.relative_freq
else dist/maxh, colors)
self.plot.addItem(item)
tooltip = "\n".join("%s: %.*f" % (n, 3 if self.relative_freq else 1, v)
for n,v in zip(cvar_values, dist/scvar if self.relative_freq else dist ))
item.tooltip = ("Normalized frequency " if self.relative_freq else "Frequency ") \
+ "(" + cvar.name + "=" + value + "):" \
+ "\n" + tooltip
self.tooltip_items.append((self.plot, item))
if self.show_prob:
item.tooltip += "\n\nProbabilities:"
for ic, a in enumerate(dist):
if self.show_prob - 1 != ic and \
self.show_prob - 1 != len(dist):
continue
position = -0.333 + ((ic+0.5)*0.666/len(dist))
if dsum < 1e-6:
continue
prob = a / dsum
if not 1e-6 < prob < 1 - 1e-6:
continue
ci = 1.96 * sqrt(prob * (1 - prob) / dsum)
item.tooltip += "\n%s: %.3f ± %.3f" % (cvar_values[ic], prob, ci)
mark = pg.ScatterPlotItem()
bar = pg.ErrorBarItem()
pen = QtGui.QPen(QtGui.QBrush(QtGui.QColor(0)), 1)
pen.setCosmetic(True)
bar.setData(x=[i+position], y=[prob],
bottom=min(numpy.array([ci]), prob),
top=min(numpy.array([ci]), 1 - prob),
beam=numpy.array([0.05]),
brush=QtGui.QColor(1), pen=pen)
mark.setData([i+position], [prob], antialias=True, symbol="o",
fillLevel=None, pxMode=True, size=10,
brush=QtGui.QColor(colors[ic]), pen=pen)
self.plot_prob.addItem(bar)
self.plot_prob.addItem(mark)
for color, name in zip(colors, cvar_values):
self._legend.addItem(
ScatterPlotItem(pen=color, brush=color, size=10, shape="s"),
escape(name)
)
self._legend.show()
def set_left_axis_name(self):
leftaxis = self.ploti.getAxis("left")
set_label = leftaxis.setLabel
if self.var and self.var.is_continuous:
set_label(["Density", "Relative density"]
[self.cvar is not None and self.relative_freq])
else:
set_label(["Frequency", "Relative frequency"]
[self.cvar is not None and self.relative_freq])
leftaxis.resizeEvent()
def enable_disable_rel_freq(self):
self.cb_prob.setDisabled(self.var is None or self.cvar is None)
self.cb_rel_freq.setDisabled(
self.var is None or self.cvar is None)
def _on_variable_idx_changed(self):
self.variable_idx = selected_index(self.varview)
self._setup()
def _on_groupvar_idx_changed(self):
self._setup()
def _on_set_smoothing(self):
self._setup()
def onDeleteWidget(self):
self.plot.clear()
super().onDeleteWidget()
def get_widget_name_extension(self):
if self.variable_idx >= 0:
return self.varmodel[self.variable_idx]
def send_report(self):
if self.variable_idx < 0:
return
self.report_plot()
text = "Distribution of '{}'".format(
self.varmodel[self.variable_idx])
if self.groupvar_idx:
group_var = self.groupvarmodel[self.groupvar_idx]
prob = self.cb_prob
indiv_probs = 0 < prob.currentIndex() < prob.count() - 1
if not indiv_probs or self.relative_freq:
text += " grouped by '{}'".format(group_var)
if self.relative_freq:
text += " (relative frequencies)"
if indiv_probs:
text += "; probabilites for '{}={}'".format(
group_var, prob.currentText())
self.report_caption(text)
def dist_sum(D1, D2):
"""
A sum of two continuous distributions.
"""
X1, W1 = D1
X2, W2 = D2
X = numpy.r_[X1, X2]
W = numpy.r_[W1, W2]
sort_ind = numpy.argsort(X)
X, W = X[sort_ind], W[sort_ind]
unique, uniq_index = numpy.unique(X, return_index=True)
spans = numpy.diff(numpy.r_[uniq_index, len(X)])
W = [numpy.sum(W[start:start + span])
for start, span in zip(uniq_index, spans)]
W = numpy.array(W)
assert W.shape[0] == unique.shape[0]
return unique, W
def ash_curve(dist, cont=None, bandwidth=None, m=3, smoothing_factor=1):
dist = numpy.asarray(dist)
X, W = dist
if bandwidth is None:
std = weighted_std(X, weights=W)
size = X.size
# if only one sample in the class
if std == 0 and cont is not None:
std = weighted_std(cont.values, weights=numpy.sum(cont.counts, axis=0))
size = cont.values.size
# if attr is constant or contingencies is None (no class variable)
if std == 0:
std = 0.1
size = X.size
bandwidth = 3.5 * std * (size ** (-1 / 3))
hist, edges = average_shifted_histogram(X, bandwidth, m, weights=W,
smoothing=smoothing_factor)
return edges, hist
def average_shifted_histogram(a, h, m=3, weights=None, smoothing=1):
"""
Compute the average shifted histogram.
Parameters
----------
a : array-like
Input data.
h : float
Base bin width.
m : int
Number of shifted histograms.
weights : array-like
An array of weights of the same shape as `a`
"""
a = numpy.asarray(a)
if weights is not None:
weights = numpy.asarray(weights)
if weights.shape != a.shape:
raise ValueError("weights should have the same shape as a")
weights = weights.ravel()
a = a.ravel()
amin, amax = a.min(), a.max()
h = h * 0.5 * smoothing
delta = h / m
wfac = 4 #extended windows for gaussian smoothing
offset = (wfac * m - 1) * delta
nbins = max(numpy.ceil((amax - amin + 2 * offset) / delta), 2 * m * wfac - 1)
bins = numpy.linspace(amin - offset, amax + offset, nbins + 1,
endpoint=True)
hist, edges = numpy.histogram(a, bins, weights=weights, density=True)
kernel = gaussian_kernel((numpy.arange(2 * wfac * m - 1) - (wfac * m - 1)) / (wfac * m), wfac)
kernel = kernel / numpy.sum(kernel)
ash = numpy.convolve(hist, kernel, mode="same")
ash = ash / numpy.diff(edges) / ash.sum()
# assert abs((numpy.diff(edges) * ash).sum()) <= 1e-6
return ash, edges
def triangular_kernel(x):
return numpy.clip(1, 0, 1 - numpy.abs(x))
def gaussian_kernel(x, k):
#fit k standard deviations into available space from [-1 .. 1]
return 1/(numpy.sqrt(2 * numpy.pi)) * numpy.exp( - (x*k)**2 / (2))
def weighted_std(a, axis=None, weights=None, ddof=0):
mean = numpy.average(a, axis=axis, weights=weights)
if axis is not None:
shape = shape_reduce_keep_dims(a.shape, axis)
mean = mean.reshape(shape)
sq_diff = numpy.power(a - mean, 2)
mean_sq_diff, wsum = numpy.average(
sq_diff, axis=axis, weights=weights, returned=True
)
if ddof != 0:
mean_sq_diff *= wsum / (wsum - ddof)
return numpy.sqrt(mean_sq_diff)
def weighted_quantiles(a, prob=[0.25, 0.5, 0.75], alphap=0.4, betap=0.4,
axis=None, weights=None):
a = numpy.asarray(a)
prob = numpy.asarray(prob)
sort_ind = numpy.argsort(a, axis)
a = a[sort_ind]
if weights is None:
weights = numpy.ones_like(a)
else:
weights = numpy.asarray(weights)
weights = weights[sort_ind]
n = numpy.sum(weights)
k = numpy.cumsum(weights, axis)
# plotting positions for the known n knots
pk = (k - alphap * weights) / (n + 1 - alphap * weights - betap * weights)
# m = alphap + prob * (1 - alphap - betap)
return numpy.interp(prob, pk, a, left=a[0], right=a[-1])
def shape_reduce_keep_dims(shape, axis):
if shape is None:
return ()
shape = list(shape)
if isinstance(axis, collections.Sequence):
for ax in axis:
shape[ax] = 1
else:
shape[axis] = 1
return tuple(shape)
def main(argv=None):
import gc
if argv is None:
argv = sys.argv
argv = list(argv)
app = QtGui.QApplication(argv)
w = OWDistributions()
w.show()
if len(argv) > 1:
filename = argv[1]
else:
filename = "heart_disease"
data = Orange.data.Table(filename)
w.set_data(data)
w.handleNewSignals()
rval = app.exec_()
w.set_data(None)
w.handleNewSignals()
w.deleteLater()
del w
app.processEvents()
gc.collect()
return rval
if __name__ == "__main__":
sys.exit(main())
|
kwikadi/orange3
|
Orange/widgets/visualize/owdistributions.py
|
Python
|
bsd-2-clause
| 27,981
|
[
"Gaussian"
] |
7fba339241c0ae183fe44ac44aeb86ae6115aedab25ac7d238bf157b33ec7667
|
"""0MQ Constants."""
#-----------------------------------------------------------------------------
# Copyright (c) 2013 Brian E. Granger & Min Ragan-Kelley
#
# This file is part of pyzmq
#
# Distributed under the terms of the New BSD License. The full license is in
# the file COPYING.BSD, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from .backend import constants
#-----------------------------------------------------------------------------
# Python module level constants
#-----------------------------------------------------------------------------
__all__ = [
'int_sockopts',
'int64_sockopts',
'bytes_sockopts',
'ctx_opts',
'ctx_opt_names',
]
int_sockopts = set()
int64_sockopts = set()
bytes_sockopts = set()
ctx_opts = set()
msg_opts = set()
names = [
# base
'VERSION',
'NOBLOCK',
'DONTWAIT',
'POLLIN',
'POLLOUT',
'POLLERR',
'STREAMER',
'FORWARDER',
'QUEUE',
'SNDMORE',
# socktypes
'PAIR',
'PUB',
'SUB',
'REQ',
'REP',
'DEALER',
'ROUTER',
'PULL',
'PUSH',
'XPUB',
'XSUB',
# events
'EVENT_CONNECTED',
'EVENT_CONNECT_DELAYED',
'EVENT_CONNECT_RETRIED',
'EVENT_LISTENING',
'EVENT_BIND_FAILED',
'EVENT_ACCEPTED',
'EVENT_ACCEPT_FAILED',
'EVENT_CLOSED',
'EVENT_CLOSE_FAILED',
'EVENT_DISCONNECTED',
## ERRNO
# Often used (these are alse in errno.)
'EAGAIN',
'EINVAL',
'EFAULT',
'ENOMEM',
'ENODEV',
# For Windows compatability
'ENOTSUP',
'EPROTONOSUPPORT',
'ENOBUFS',
'ENETDOWN',
'EADDRINUSE',
'EADDRNOTAVAIL',
'ECONNREFUSED',
'EINPROGRESS',
'ENOTSOCK',
# new errnos in zmq3
'EAFNOSUPPORT',
'EHOSTUNREACH',
# 0MQ Native
'EFSM',
'ENOCOMPATPROTO',
'ETERM',
'EMTHREAD',
'EAGAIN',
'EINVAL',
'EFAULT',
'ENOMEM',
'ENODEV',
# For Windows compatability
'ENOTSUP',
'EPROTONOSUPPORT',
'ENOBUFS',
'ENETDOWN',
'EADDRINUSE',
'EADDRNOTAVAIL',
'ECONNREFUSED',
'EINPROGRESS',
'ENOTSOCK',
# new errnos in zmq3
"EMSGSIZE",
"EAFNOSUPPORT",
"ENETUNREACH",
"ECONNABORTED",
"ECONNRESET",
"ENOTCONN",
"ETIMEDOUT",
"EHOSTUNREACH",
"ENETRESET",
# 0MQ Native
'EFSM',
'ENOCOMPATPROTO',
'ETERM',
'EMTHREAD',
]
int64_sockopt_names = [
'AFFINITY',
'MAXMSGSIZE',
# sockopts removed in 3.0.0
'HWM',
'SWAP',
'MCAST_LOOP',
'RECOVERY_IVL_MSEC',
]
bytes_sockopt_names = [
'IDENTITY',
'SUBSCRIBE',
'UNSUBSCRIBE',
'LAST_ENDPOINT',
'TCP_ACCEPT_FILTER',
]
int_sockopt_names = [
# sockopts
'RECONNECT_IVL_MAX',
# sockopts new in 2.2.0
'SNDTIMEO',
'RCVTIMEO',
# new in 3.x
'SNDHWM',
'RCVHWM',
'MULTICAST_HOPS',
'IPV4ONLY',
'ROUTER_BEHAVIOR',
'TCP_KEEPALIVE',
'TCP_KEEPALIVE_CNT',
'TCP_KEEPALIVE_IDLE',
'TCP_KEEPALIVE_INTVL',
'DELAY_ATTACH_ON_CONNECT',
'XPUB_VERBOSE',
'ROUTER_RAW',
'FD',
'EVENTS',
'TYPE',
'LINGER',
'RECONNECT_IVL',
'BACKLOG',
]
ctx_opt_names = [
'IO_THREADS',
'MAX_SOCKETS',
]
msg_opt_names = [
'MORE',
]
switched_names = [
'RATE',
'RECOVERY_IVL',
'SNDBUF',
'RCVBUF',
'RCVMORE',
]
if constants.VERSION < 30000:
int64_sockopt_names.extend(switched_names)
else:
int_sockopt_names.extend(switched_names)
def _add_constant(name, container=None):
c = getattr(constants, name, -1)
if c == -1:
return
globals()[name] = c
__all__.append(name)
if container is not None:
container.add(c)
return c
for name in names:
_add_constant(name)
for name in int_sockopt_names:
_add_constant(name, int_sockopts)
for name in int64_sockopt_names:
_add_constant(name, int64_sockopts)
for name in bytes_sockopt_names:
_add_constant(name, bytes_sockopts)
for name in ctx_opt_names:
_add_constant(name, ctx_opts)
for name in msg_opt_names:
_add_constant(name, msg_opts)
|
IsCoolEntertainment/debpkg_python-pyzmq
|
zmq/sugar/constants.py
|
Python
|
lgpl-3.0
| 4,371
|
[
"Brian"
] |
1dbd396fb1baad7a794b5facf17120124c8191d70ccdc09e8fcc71f057161d9b
|
# Autodetecting setup.py script for building the Python extensions
#
import sys, os, importlib.machinery, re, optparse
from glob import glob
import importlib._bootstrap
import importlib.util
import sysconfig
from distutils import log
from distutils import text_file
from distutils.errors import *
from distutils.core import Extension, setup
from distutils.command.build_ext import build_ext
from distutils.command.install import install
from distutils.command.install_lib import install_lib
from distutils.command.build_scripts import build_scripts
from distutils.spawn import find_executable
cross_compiling = "_PYTHON_HOST_PLATFORM" in os.environ
# Add special CFLAGS reserved for building the interpreter and the stdlib
# modules (Issue #21121).
cflags = sysconfig.get_config_var('CFLAGS')
py_cflags_nodist = sysconfig.get_config_var('PY_CFLAGS_NODIST')
sysconfig.get_config_vars()['CFLAGS'] = cflags + ' ' + py_cflags_nodist
def get_platform():
# cross build
if "_PYTHON_HOST_PLATFORM" in os.environ:
return os.environ["_PYTHON_HOST_PLATFORM"]
# Get value of sys.platform
if sys.platform.startswith('osf1'):
return 'osf1'
return sys.platform
host_platform = get_platform()
# Were we compiled --with-pydebug or with #define Py_DEBUG?
COMPILED_WITH_PYDEBUG = ('--with-pydebug' in sysconfig.get_config_var("CONFIG_ARGS"))
# This global variable is used to hold the list of modules to be disabled.
disabled_module_list = []
def add_dir_to_list(dirlist, dir):
"""Add the directory 'dir' to the list 'dirlist' (after any relative
directories) if:
1) 'dir' is not already in 'dirlist'
2) 'dir' actually exists, and is a directory.
"""
if dir is None or not os.path.isdir(dir) or dir in dirlist:
return
for i, path in enumerate(dirlist):
if not os.path.isabs(path):
dirlist.insert(i + 1, dir)
return
dirlist.insert(0, dir)
def macosx_sdk_root():
"""
Return the directory of the current OSX SDK,
or '/' if no SDK was specified.
"""
cflags = sysconfig.get_config_var('CFLAGS')
m = re.search(r'-isysroot\s+(\S+)', cflags)
if m is None:
sysroot = '/'
else:
sysroot = m.group(1)
return sysroot
def is_macosx_sdk_path(path):
"""
Returns True if 'path' can be located in an OSX SDK
"""
return ( (path.startswith('/usr/') and not path.startswith('/usr/local'))
or path.startswith('/System/')
or path.startswith('/Library/') )
def find_file(filename, std_dirs, paths):
"""Searches for the directory where a given file is located,
and returns a possibly-empty list of additional directories, or None
if the file couldn't be found at all.
'filename' is the name of a file, such as readline.h or libcrypto.a.
'std_dirs' is the list of standard system directories; if the
file is found in one of them, no additional directives are needed.
'paths' is a list of additional locations to check; if the file is
found in one of them, the resulting list will contain the directory.
"""
if host_platform == 'darwin':
# Honor the MacOSX SDK setting when one was specified.
# An SDK is a directory with the same structure as a real
# system, but with only header files and libraries.
sysroot = macosx_sdk_root()
# Check the standard locations
for dir in std_dirs:
f = os.path.join(dir, filename)
if host_platform == 'darwin' and is_macosx_sdk_path(dir):
f = os.path.join(sysroot, dir[1:], filename)
if os.path.exists(f): return []
# Check the additional directories
for dir in paths:
f = os.path.join(dir, filename)
if host_platform == 'darwin' and is_macosx_sdk_path(dir):
f = os.path.join(sysroot, dir[1:], filename)
if os.path.exists(f):
return [dir]
# Not found anywhere
return None
def find_library_file(compiler, libname, std_dirs, paths):
result = compiler.find_library_file(std_dirs + paths, libname)
if result is None:
return None
if host_platform == 'darwin':
sysroot = macosx_sdk_root()
# Check whether the found file is in one of the standard directories
dirname = os.path.dirname(result)
for p in std_dirs:
# Ensure path doesn't end with path separator
p = p.rstrip(os.sep)
if host_platform == 'darwin' and is_macosx_sdk_path(p):
if os.path.join(sysroot, p[1:]) == dirname:
return [ ]
if p == dirname:
return [ ]
# Otherwise, it must have been in one of the additional directories,
# so we have to figure out which one.
for p in paths:
# Ensure path doesn't end with path separator
p = p.rstrip(os.sep)
if host_platform == 'darwin' and is_macosx_sdk_path(p):
if os.path.join(sysroot, p[1:]) == dirname:
return [ p ]
if p == dirname:
return [p]
else:
assert False, "Internal error: Path not found in std_dirs or paths"
def module_enabled(extlist, modname):
"""Returns whether the module 'modname' is present in the list
of extensions 'extlist'."""
extlist = [ext for ext in extlist if ext.name == modname]
return len(extlist)
def find_module_file(module, dirlist):
"""Find a module in a set of possible folders. If it is not found
return the unadorned filename"""
list = find_file(module, [], dirlist)
if not list:
return module
if len(list) > 1:
log.info("WARNING: multiple copies of %s found"%module)
return os.path.join(list[0], module)
class PyBuildExt(build_ext):
def __init__(self, dist):
build_ext.__init__(self, dist)
self.failed = []
def build_extensions(self):
# Detect which modules should be compiled
missing = self.detect_modules()
# Remove modules that are present on the disabled list
extensions = [ext for ext in self.extensions
if ext.name not in disabled_module_list]
# move ctypes to the end, it depends on other modules
ext_map = dict((ext.name, i) for i, ext in enumerate(extensions))
if "_ctypes" in ext_map:
ctypes = extensions.pop(ext_map["_ctypes"])
extensions.append(ctypes)
self.extensions = extensions
# Fix up the autodetected modules, prefixing all the source files
# with Modules/.
srcdir = sysconfig.get_config_var('srcdir')
if not srcdir:
# Maybe running on Windows but not using CYGWIN?
raise ValueError("No source directory; cannot proceed.")
srcdir = os.path.abspath(srcdir)
moddirlist = [os.path.join(srcdir, 'Modules')]
# Fix up the paths for scripts, too
self.distribution.scripts = [os.path.join(srcdir, filename)
for filename in self.distribution.scripts]
# Python header files
headers = [sysconfig.get_config_h_filename()]
headers += glob(os.path.join(sysconfig.get_path('include'), "*.h"))
for ext in self.extensions[:]:
ext.sources = [ find_module_file(filename, moddirlist)
for filename in ext.sources ]
if ext.depends is not None:
ext.depends = [find_module_file(filename, moddirlist)
for filename in ext.depends]
else:
ext.depends = []
# re-compile extensions if a header file has been changed
ext.depends.extend(headers)
# If a module has already been built statically,
# don't build it here
if ext.name in sys.builtin_module_names:
self.extensions.remove(ext)
# Parse Modules/Setup and Modules/Setup.local to figure out which
# modules are turned on in the file.
remove_modules = []
for filename in ('Modules/Setup', 'Modules/Setup.local'):
input = text_file.TextFile(filename, join_lines=1)
while 1:
line = input.readline()
if not line: break
line = line.split()
remove_modules.append(line[0])
input.close()
for ext in self.extensions[:]:
if ext.name in remove_modules:
self.extensions.remove(ext)
# When you run "make CC=altcc" or something similar, you really want
# those environment variables passed into the setup.py phase. Here's
# a small set of useful ones.
compiler = os.environ.get('CC')
args = {}
# unfortunately, distutils doesn't let us provide separate C and C++
# compilers
if compiler is not None:
(ccshared,cflags) = sysconfig.get_config_vars('CCSHARED','CFLAGS')
args['compiler_so'] = compiler + ' ' + ccshared + ' ' + cflags
self.compiler.set_executables(**args)
build_ext.build_extensions(self)
longest = max([len(e.name) for e in self.extensions])
if self.failed:
longest = max(longest, max([len(name) for name in self.failed]))
def print_three_column(lst):
lst.sort(key=str.lower)
# guarantee zip() doesn't drop anything
while len(lst) % 3:
lst.append("")
for e, f, g in zip(lst[::3], lst[1::3], lst[2::3]):
print("%-*s %-*s %-*s" % (longest, e, longest, f,
longest, g))
if missing:
print()
print("Python build finished successfully!")
print("The necessary bits to build these optional modules were not "
"found:")
print_three_column(missing)
print("To find the necessary bits, look in setup.py in"
" detect_modules() for the module's name.")
print()
if self.failed:
failed = self.failed[:]
print()
print("Failed to build these modules:")
print_three_column(failed)
print()
def build_extension(self, ext):
if ext.name == '_ctypes':
if not self.configure_ctypes(ext):
return
try:
build_ext.build_extension(self, ext)
except (CCompilerError, DistutilsError) as why:
self.announce('WARNING: building of extension "%s" failed: %s' %
(ext.name, sys.exc_info()[1]))
self.failed.append(ext.name)
return
# Workaround for Mac OS X: The Carbon-based modules cannot be
# reliably imported into a command-line Python
if 'Carbon' in ext.extra_link_args:
self.announce(
'WARNING: skipping import check for Carbon-based "%s"' %
ext.name)
return
if host_platform == 'darwin' and (
sys.maxsize > 2**32 and '-arch' in ext.extra_link_args):
# Don't bother doing an import check when an extension was
# build with an explicit '-arch' flag on OSX. That's currently
# only used to build 32-bit only extensions in a 4-way
# universal build and loading 32-bit code into a 64-bit
# process will fail.
self.announce(
'WARNING: skipping import check for "%s"' %
ext.name)
return
# Workaround for Cygwin: Cygwin currently has fork issues when many
# modules have been imported
if host_platform == 'cygwin':
self.announce('WARNING: skipping import check for Cygwin-based "%s"'
% ext.name)
return
ext_filename = os.path.join(
self.build_lib,
self.get_ext_filename(self.get_ext_fullname(ext.name)))
# If the build directory didn't exist when setup.py was
# started, sys.path_importer_cache has a negative result
# cached. Clear that cache before trying to import.
sys.path_importer_cache.clear()
# Don't try to load extensions for cross builds
if cross_compiling:
return
loader = importlib.machinery.ExtensionFileLoader(ext.name, ext_filename)
spec = importlib.util.spec_from_file_location(ext.name, ext_filename,
loader=loader)
try:
importlib._bootstrap._SpecMethods(spec).load()
except ImportError as why:
self.failed.append(ext.name)
self.announce('*** WARNING: renaming "%s" since importing it'
' failed: %s' % (ext.name, why), level=3)
assert not self.inplace
basename, tail = os.path.splitext(ext_filename)
newname = basename + "_failed" + tail
if os.path.exists(newname):
os.remove(newname)
os.rename(ext_filename, newname)
# XXX -- This relies on a Vile HACK in
# distutils.command.build_ext.build_extension(). The
# _built_objects attribute is stored there strictly for
# use here.
# If there is a failure, _built_objects may not be there,
# so catch the AttributeError and move on.
try:
for filename in self._built_objects:
os.remove(filename)
except AttributeError:
self.announce('unable to remove files (ignored)')
except:
exc_type, why, tb = sys.exc_info()
self.announce('*** WARNING: importing extension "%s" '
'failed with %s: %s' % (ext.name, exc_type, why),
level=3)
self.failed.append(ext.name)
def add_multiarch_paths(self):
# Debian/Ubuntu multiarch support.
# https://wiki.ubuntu.com/MultiarchSpec
cc = sysconfig.get_config_var('CC')
tmpfile = os.path.join(self.build_temp, 'multiarch')
if not os.path.exists(self.build_temp):
os.makedirs(self.build_temp)
ret = os.system(
'%s -print-multiarch > %s 2> /dev/null' % (cc, tmpfile))
multiarch_path_component = ''
try:
if ret >> 8 == 0:
with open(tmpfile) as fp:
multiarch_path_component = fp.readline().strip()
finally:
os.unlink(tmpfile)
if multiarch_path_component != '':
add_dir_to_list(self.compiler.library_dirs,
'/usr/lib/' + multiarch_path_component)
add_dir_to_list(self.compiler.include_dirs,
'/usr/include/' + multiarch_path_component)
return
if not find_executable('dpkg-architecture'):
return
opt = ''
if cross_compiling:
opt = '-t' + sysconfig.get_config_var('HOST_GNU_TYPE')
tmpfile = os.path.join(self.build_temp, 'multiarch')
if not os.path.exists(self.build_temp):
os.makedirs(self.build_temp)
ret = os.system(
'dpkg-architecture %s -qDEB_HOST_MULTIARCH > %s 2> /dev/null' %
(opt, tmpfile))
try:
if ret >> 8 == 0:
with open(tmpfile) as fp:
multiarch_path_component = fp.readline().strip()
add_dir_to_list(self.compiler.library_dirs,
'/usr/lib/' + multiarch_path_component)
add_dir_to_list(self.compiler.include_dirs,
'/usr/include/' + multiarch_path_component)
finally:
os.unlink(tmpfile)
def add_gcc_paths(self):
gcc = sysconfig.get_config_var('CC')
tmpfile = os.path.join(self.build_temp, 'gccpaths')
if not os.path.exists(self.build_temp):
os.makedirs(self.build_temp)
ret = os.system('%s -E -v - </dev/null 2>%s 1>/dev/null' % (gcc, tmpfile))
is_gcc = False
in_incdirs = False
inc_dirs = []
lib_dirs = []
try:
if ret >> 8 == 0:
with open(tmpfile) as fp:
for line in fp.readlines():
if line.startswith("gcc version"):
is_gcc = True
elif line.startswith("#include <...>"):
in_incdirs = True
elif line.startswith("End of search list"):
in_incdirs = False
elif is_gcc and line.startswith("LIBRARY_PATH"):
for d in line.strip().split("=")[1].split(":"):
d = os.path.normpath(d)
if '/gcc/' not in d:
add_dir_to_list(self.compiler.library_dirs,
d)
elif is_gcc and in_incdirs and '/gcc/' not in line:
add_dir_to_list(self.compiler.include_dirs,
line.strip())
finally:
os.unlink(tmpfile)
def detect_modules(self):
# Ensure that /usr/local is always used, but the local build
# directories (i.e. '.' and 'Include') must be first. See issue
# 10520.
if not cross_compiling:
add_dir_to_list(self.compiler.library_dirs, '/usr/local/lib')
add_dir_to_list(self.compiler.include_dirs, '/usr/local/include')
# only change this for cross builds for 3.3, issues on Mageia
if cross_compiling:
self.add_gcc_paths()
self.add_multiarch_paths()
# Add paths specified in the environment variables LDFLAGS and
# CPPFLAGS for header and library files.
# We must get the values from the Makefile and not the environment
# directly since an inconsistently reproducible issue comes up where
# the environment variable is not set even though the value were passed
# into configure and stored in the Makefile (issue found on OS X 10.3).
for env_var, arg_name, dir_list in (
('LDFLAGS', '-R', self.compiler.runtime_library_dirs),
('LDFLAGS', '-L', self.compiler.library_dirs),
('CPPFLAGS', '-I', self.compiler.include_dirs)):
env_val = sysconfig.get_config_var(env_var)
if env_val:
# To prevent optparse from raising an exception about any
# options in env_val that it doesn't know about we strip out
# all double dashes and any dashes followed by a character
# that is not for the option we are dealing with.
#
# Please note that order of the regex is important! We must
# strip out double-dashes first so that we don't end up with
# substituting "--Long" to "-Long" and thus lead to "ong" being
# used for a library directory.
env_val = re.sub(r'(^|\s+)-(-|(?!%s))' % arg_name[1],
' ', env_val)
parser = optparse.OptionParser()
# Make sure that allowing args interspersed with options is
# allowed
parser.allow_interspersed_args = True
parser.error = lambda msg: None
parser.add_option(arg_name, dest="dirs", action="append")
options = parser.parse_args(env_val.split())[0]
if options.dirs:
for directory in reversed(options.dirs):
add_dir_to_list(dir_list, directory)
if os.path.normpath(sys.base_prefix) != '/usr' \
and not sysconfig.get_config_var('PYTHONFRAMEWORK'):
# OSX note: Don't add LIBDIR and INCLUDEDIR to building a framework
# (PYTHONFRAMEWORK is set) to avoid # linking problems when
# building a framework with different architectures than
# the one that is currently installed (issue #7473)
add_dir_to_list(self.compiler.library_dirs,
sysconfig.get_config_var("LIBDIR"))
add_dir_to_list(self.compiler.include_dirs,
sysconfig.get_config_var("INCLUDEDIR"))
# lib_dirs and inc_dirs are used to search for files;
# if a file is found in one of those directories, it can
# be assumed that no additional -I,-L directives are needed.
if not cross_compiling:
lib_dirs = self.compiler.library_dirs + [
'/lib64', '/usr/lib64',
'/lib', '/usr/lib',
]
inc_dirs = self.compiler.include_dirs + ['/usr/include']
else:
lib_dirs = self.compiler.library_dirs[:]
inc_dirs = self.compiler.include_dirs[:]
exts = []
missing = []
config_h = sysconfig.get_config_h_filename()
with open(config_h) as file:
config_h_vars = sysconfig.parse_config_h(file)
srcdir = sysconfig.get_config_var('srcdir')
# OSF/1 and Unixware have some stuff in /usr/ccs/lib (like -ldb)
if host_platform in ['osf1', 'unixware7', 'openunix8']:
lib_dirs += ['/usr/ccs/lib']
# HP-UX11iv3 keeps files in lib/hpux folders.
if host_platform == 'hp-ux11':
lib_dirs += ['/usr/lib/hpux64', '/usr/lib/hpux32']
if host_platform == 'darwin':
# This should work on any unixy platform ;-)
# If the user has bothered specifying additional -I and -L flags
# in OPT and LDFLAGS we might as well use them here.
#
# NOTE: using shlex.split would technically be more correct, but
# also gives a bootstrap problem. Let's hope nobody uses
# directories with whitespace in the name to store libraries.
cflags, ldflags = sysconfig.get_config_vars(
'CFLAGS', 'LDFLAGS')
for item in cflags.split():
if item.startswith('-I'):
inc_dirs.append(item[2:])
for item in ldflags.split():
if item.startswith('-L'):
lib_dirs.append(item[2:])
# Check for MacOS X, which doesn't need libm.a at all
math_libs = ['m']
if host_platform == 'darwin':
math_libs = []
# XXX Omitted modules: gl, pure, dl, SGI-specific modules
#
# The following modules are all pretty straightforward, and compile
# on pretty much any POSIXish platform.
#
# array objects
exts.append( Extension('array', ['arraymodule.c']) )
# complex math library functions
exts.append( Extension('cmath', ['cmathmodule.c', '_math.c'],
depends=['_math.h'],
libraries=math_libs) )
# math library functions, e.g. sin()
exts.append( Extension('math', ['mathmodule.c', '_math.c'],
depends=['_math.h'],
libraries=math_libs) )
# time libraries: librt may be needed for clock_gettime()
time_libs = []
lib = sysconfig.get_config_var('TIMEMODULE_LIB')
if lib:
time_libs.append(lib)
# time operations and variables
exts.append( Extension('time', ['timemodule.c'],
libraries=time_libs) )
exts.append( Extension('_datetime', ['_datetimemodule.c']) )
# random number generator implemented in C
exts.append( Extension("_random", ["_randommodule.c"]) )
# bisect
exts.append( Extension("_bisect", ["_bisectmodule.c"]) )
# heapq
exts.append( Extension("_heapq", ["_heapqmodule.c"]) )
# C-optimized pickle replacement
exts.append( Extension("_pickle", ["_pickle.c"]) )
# atexit
exts.append( Extension("atexit", ["atexitmodule.c"]) )
# _json speedups
exts.append( Extension("_json", ["_json.c"]) )
# Python C API test module
exts.append( Extension('_testcapi', ['_testcapimodule.c'],
depends=['testcapi_long.h']) )
# Python PEP-3118 (buffer protocol) test module
exts.append( Extension('_testbuffer', ['_testbuffer.c']) )
# Test loading multiple modules from one compiled file (http://bugs.python.org/issue16421)
exts.append( Extension('_testimportmultiple', ['_testimportmultiple.c']) )
# profiler (_lsprof is for cProfile.py)
exts.append( Extension('_lsprof', ['_lsprof.c', 'rotatingtree.c']) )
# static Unicode character database
exts.append( Extension('unicodedata', ['unicodedata.c']) )
# _opcode module
exts.append( Extension('_opcode', ['_opcode.c']) )
# Modules with some UNIX dependencies -- on by default:
# (If you have a really backward UNIX, select and socket may not be
# supported...)
# fcntl(2) and ioctl(2)
libs = []
if (config_h_vars.get('FLOCK_NEEDS_LIBBSD', False)):
# May be necessary on AIX for flock function
libs = ['bsd']
exts.append( Extension('fcntl', ['fcntlmodule.c'], libraries=libs) )
# pwd(3)
exts.append( Extension('pwd', ['pwdmodule.c']) )
# grp(3)
exts.append( Extension('grp', ['grpmodule.c']) )
# spwd, shadow passwords
if (config_h_vars.get('HAVE_GETSPNAM', False) or
config_h_vars.get('HAVE_GETSPENT', False)):
exts.append( Extension('spwd', ['spwdmodule.c']) )
else:
missing.append('spwd')
# select(2); not on ancient System V
exts.append( Extension('select', ['selectmodule.c']) )
# Fred Drake's interface to the Python parser
exts.append( Extension('parser', ['parsermodule.c']) )
# Memory-mapped files (also works on Win32).
exts.append( Extension('mmap', ['mmapmodule.c']) )
# Lance Ellinghaus's syslog module
# syslog daemon interface
exts.append( Extension('syslog', ['syslogmodule.c']) )
#
# Here ends the simple stuff. From here on, modules need certain
# libraries, are platform-specific, or present other surprises.
#
# Multimedia modules
# These don't work for 64-bit platforms!!!
# These represent audio samples or images as strings:
# Operations on audio samples
# According to #993173, this one should actually work fine on
# 64-bit platforms.
exts.append( Extension('audioop', ['audioop.c']) )
# readline
do_readline = self.compiler.find_library_file(lib_dirs, 'readline')
readline_termcap_library = ""
curses_library = ""
# Cannot use os.popen here in py3k.
tmpfile = os.path.join(self.build_temp, 'readline_termcap_lib')
if not os.path.exists(self.build_temp):
os.makedirs(self.build_temp)
# Determine if readline is already linked against curses or tinfo.
if do_readline:
if cross_compiling:
ret = os.system("%s -d %s | grep '(NEEDED)' > %s" \
% (sysconfig.get_config_var('READELF'),
do_readline, tmpfile))
elif find_executable('ldd'):
ret = os.system("ldd %s > %s" % (do_readline, tmpfile))
else:
ret = 256
if ret >> 8 == 0:
with open(tmpfile) as fp:
for ln in fp:
if 'curses' in ln:
readline_termcap_library = re.sub(
r'.*lib(n?cursesw?)\.so.*', r'\1', ln
).rstrip()
break
# termcap interface split out from ncurses
if 'tinfo' in ln:
readline_termcap_library = 'tinfo'
break
if os.path.exists(tmpfile):
os.unlink(tmpfile)
# Issue 7384: If readline is already linked against curses,
# use the same library for the readline and curses modules.
if 'curses' in readline_termcap_library:
curses_library = readline_termcap_library
elif self.compiler.find_library_file(lib_dirs, 'ncursesw'):
curses_library = 'ncursesw'
elif self.compiler.find_library_file(lib_dirs, 'ncurses'):
curses_library = 'ncurses'
elif self.compiler.find_library_file(lib_dirs, 'curses'):
curses_library = 'curses'
if host_platform == 'darwin':
os_release = int(os.uname()[2].split('.')[0])
dep_target = sysconfig.get_config_var('MACOSX_DEPLOYMENT_TARGET')
if (dep_target and
(tuple(int(n) for n in dep_target.split('.')[0:2])
< (10, 5) ) ):
os_release = 8
if os_release < 9:
# MacOSX 10.4 has a broken readline. Don't try to build
# the readline module unless the user has installed a fixed
# readline package
if find_file('readline/rlconf.h', inc_dirs, []) is None:
do_readline = False
if do_readline:
if host_platform == 'darwin' and os_release < 9:
# In every directory on the search path search for a dynamic
# library and then a static library, instead of first looking
# for dynamic libraries on the entire path.
# This way a staticly linked custom readline gets picked up
# before the (possibly broken) dynamic library in /usr/lib.
readline_extra_link_args = ('-Wl,-search_paths_first',)
else:
readline_extra_link_args = ()
readline_libs = ['readline']
if readline_termcap_library:
pass # Issue 7384: Already linked against curses or tinfo.
elif curses_library:
readline_libs.append(curses_library)
elif self.compiler.find_library_file(lib_dirs +
['/usr/lib/termcap'],
'termcap'):
readline_libs.append('termcap')
exts.append( Extension('readline', ['readline.c'],
library_dirs=['/usr/lib/termcap'],
extra_link_args=readline_extra_link_args,
libraries=readline_libs) )
else:
missing.append('readline')
# crypt module.
if self.compiler.find_library_file(lib_dirs, 'crypt'):
libs = ['crypt']
else:
libs = []
exts.append( Extension('_crypt', ['_cryptmodule.c'], libraries=libs) )
# CSV files
exts.append( Extension('_csv', ['_csv.c']) )
# POSIX subprocess module helper.
exts.append( Extension('_posixsubprocess', ['_posixsubprocess.c']) )
# socket(2)
exts.append( Extension('_socket', ['socketmodule.c'],
depends = ['socketmodule.h']) )
# Detect SSL support for the socket module (via _ssl)
search_for_ssl_incs_in = [
os.getenv("KBE_ROOT") + '/kbe/src/lib/dependencies/openssl/include/',
os.getenv("KBE_ROOT") + 'kbe/src/lib/dependencies/openssl/include/',
os.getcwd()[0 : os.getcwd().find("kbe/src/lib") + len("kbe/src/lib")] + '/dependencies/openssl/include/',
]
ssl_incs = find_file('openssl/ssl.h', [],
search_for_ssl_incs_in
)
if ssl_incs is not None:
krb5_h = find_file('krb5.h', search_for_ssl_incs_in,
['/usr/kerberos/include'])
if krb5_h:
ssl_incs += krb5_h
ssl_libs = find_library_file(self.compiler, 'ssl',[],
[os.getenv("KBE_ROOT") + '/kbe/src/libs/',
os.getenv("KBE_ROOT") + 'kbe/src/libs/',
os.getcwd()[0 : os.getcwd().find("kbe/src/") + len("kbe/src/")] + 'libs/',
] )
if (ssl_incs is not None and
ssl_libs is not None):
exts.append( Extension('_ssl', ['_ssl.c'],
include_dirs = ssl_incs,
library_dirs = ssl_libs,
libraries = ['ssl', 'crypto'],
depends = ['socketmodule.h']), )
else:
missing.append('_ssl')
# find out which version of OpenSSL we have
openssl_ver = 0
openssl_ver_re = re.compile(
'^\s*#\s*define\s+OPENSSL_VERSION_NUMBER\s+(0x[0-9a-fA-F]+)' )
# look for the openssl version header on the compiler search path.
opensslv_h = find_file('openssl/opensslv.h', [],
search_for_ssl_incs_in)
if opensslv_h:
name = os.path.join(opensslv_h[0], 'openssl/opensslv.h')
if host_platform == 'darwin' and is_macosx_sdk_path(name):
name = os.path.join(macosx_sdk_root(), name[1:])
try:
with open(name, 'r') as incfile:
for line in incfile:
m = openssl_ver_re.match(line)
if m:
openssl_ver = int(m.group(1), 16)
break
except IOError as msg:
print("IOError while reading opensshv.h:", msg)
#print('openssl_ver = 0x%08x' % openssl_ver)
min_openssl_ver = 0x00907000
have_any_openssl = ssl_incs is not None and ssl_libs is not None
have_usable_openssl = (have_any_openssl and
openssl_ver >= min_openssl_ver)
if have_any_openssl:
if have_usable_openssl:
# The _hashlib module wraps optimized implementations
# of hash functions from the OpenSSL library.
exts.append( Extension('_hashlib', ['_hashopenssl.c'],
depends = ['hashlib.h'],
include_dirs = ssl_incs,
library_dirs = ssl_libs,
libraries = ['ssl', 'crypto']) )
else:
print("warning: openssl 0x%08x is too old for _hashlib" %
openssl_ver)
missing.append('_hashlib')
# We always compile these even when OpenSSL is available (issue #14693).
# It's harmless and the object code is tiny (40-50 KB per module,
# only loaded when actually used).
exts.append( Extension('_sha256', ['sha256module.c'],
depends=['hashlib.h']) )
exts.append( Extension('_sha512', ['sha512module.c'],
depends=['hashlib.h']) )
exts.append( Extension('_md5', ['md5module.c'],
depends=['hashlib.h']) )
exts.append( Extension('_sha1', ['sha1module.c'],
depends=['hashlib.h']) )
# Modules that provide persistent dictionary-like semantics. You will
# probably want to arrange for at least one of them to be available on
# your machine, though none are defined by default because of library
# dependencies. The Python module dbm/__init__.py provides an
# implementation independent wrapper for these; dbm/dumb.py provides
# similar functionality (but slower of course) implemented in Python.
# Sleepycat^WOracle Berkeley DB interface.
# http://www.oracle.com/database/berkeley-db/db/index.html
#
# This requires the Sleepycat^WOracle DB code. The supported versions
# are set below. Visit the URL above to download
# a release. Most open source OSes come with one or more
# versions of BerkeleyDB already installed.
max_db_ver = (5, 3)
min_db_ver = (3, 3)
db_setup_debug = False # verbose debug prints from this script?
def allow_db_ver(db_ver):
"""Returns a boolean if the given BerkeleyDB version is acceptable.
Args:
db_ver: A tuple of the version to verify.
"""
if not (min_db_ver <= db_ver <= max_db_ver):
return False
return True
def gen_db_minor_ver_nums(major):
if major == 4:
for x in range(max_db_ver[1]+1):
if allow_db_ver((4, x)):
yield x
elif major == 3:
for x in (3,):
if allow_db_ver((3, x)):
yield x
else:
raise ValueError("unknown major BerkeleyDB version", major)
# construct a list of paths to look for the header file in on
# top of the normal inc_dirs.
db_inc_paths = [
'/usr/include/db4',
'/usr/local/include/db4',
'/opt/sfw/include/db4',
'/usr/include/db3',
'/usr/local/include/db3',
'/opt/sfw/include/db3',
# Fink defaults (http://fink.sourceforge.net/)
'/sw/include/db4',
'/sw/include/db3',
]
# 4.x minor number specific paths
for x in gen_db_minor_ver_nums(4):
db_inc_paths.append('/usr/include/db4%d' % x)
db_inc_paths.append('/usr/include/db4.%d' % x)
db_inc_paths.append('/usr/local/BerkeleyDB.4.%d/include' % x)
db_inc_paths.append('/usr/local/include/db4%d' % x)
db_inc_paths.append('/pkg/db-4.%d/include' % x)
db_inc_paths.append('/opt/db-4.%d/include' % x)
# MacPorts default (http://www.macports.org/)
db_inc_paths.append('/opt/local/include/db4%d' % x)
# 3.x minor number specific paths
for x in gen_db_minor_ver_nums(3):
db_inc_paths.append('/usr/include/db3%d' % x)
db_inc_paths.append('/usr/local/BerkeleyDB.3.%d/include' % x)
db_inc_paths.append('/usr/local/include/db3%d' % x)
db_inc_paths.append('/pkg/db-3.%d/include' % x)
db_inc_paths.append('/opt/db-3.%d/include' % x)
if cross_compiling:
db_inc_paths = []
# Add some common subdirectories for Sleepycat DB to the list,
# based on the standard include directories. This way DB3/4 gets
# picked up when it is installed in a non-standard prefix and
# the user has added that prefix into inc_dirs.
std_variants = []
for dn in inc_dirs:
std_variants.append(os.path.join(dn, 'db3'))
std_variants.append(os.path.join(dn, 'db4'))
for x in gen_db_minor_ver_nums(4):
std_variants.append(os.path.join(dn, "db4%d"%x))
std_variants.append(os.path.join(dn, "db4.%d"%x))
for x in gen_db_minor_ver_nums(3):
std_variants.append(os.path.join(dn, "db3%d"%x))
std_variants.append(os.path.join(dn, "db3.%d"%x))
db_inc_paths = std_variants + db_inc_paths
db_inc_paths = [p for p in db_inc_paths if os.path.exists(p)]
db_ver_inc_map = {}
if host_platform == 'darwin':
sysroot = macosx_sdk_root()
class db_found(Exception): pass
try:
# See whether there is a Sleepycat header in the standard
# search path.
for d in inc_dirs + db_inc_paths:
f = os.path.join(d, "db.h")
if host_platform == 'darwin' and is_macosx_sdk_path(d):
f = os.path.join(sysroot, d[1:], "db.h")
if db_setup_debug: print("db: looking for db.h in", f)
if os.path.exists(f):
with open(f, 'rb') as file:
f = file.read()
m = re.search(br"#define\WDB_VERSION_MAJOR\W(\d+)", f)
if m:
db_major = int(m.group(1))
m = re.search(br"#define\WDB_VERSION_MINOR\W(\d+)", f)
db_minor = int(m.group(1))
db_ver = (db_major, db_minor)
# Avoid 4.6 prior to 4.6.21 due to a BerkeleyDB bug
if db_ver == (4, 6):
m = re.search(br"#define\WDB_VERSION_PATCH\W(\d+)", f)
db_patch = int(m.group(1))
if db_patch < 21:
print("db.h:", db_ver, "patch", db_patch,
"being ignored (4.6.x must be >= 4.6.21)")
continue
if ( (db_ver not in db_ver_inc_map) and
allow_db_ver(db_ver) ):
# save the include directory with the db.h version
# (first occurrence only)
db_ver_inc_map[db_ver] = d
if db_setup_debug:
print("db.h: found", db_ver, "in", d)
else:
# we already found a header for this library version
if db_setup_debug: print("db.h: ignoring", d)
else:
# ignore this header, it didn't contain a version number
if db_setup_debug:
print("db.h: no version number version in", d)
db_found_vers = list(db_ver_inc_map.keys())
db_found_vers.sort()
while db_found_vers:
db_ver = db_found_vers.pop()
db_incdir = db_ver_inc_map[db_ver]
# check lib directories parallel to the location of the header
db_dirs_to_check = [
db_incdir.replace("include", 'lib64'),
db_incdir.replace("include", 'lib'),
]
if host_platform != 'darwin':
db_dirs_to_check = list(filter(os.path.isdir, db_dirs_to_check))
else:
# Same as other branch, but takes OSX SDK into account
tmp = []
for dn in db_dirs_to_check:
if is_macosx_sdk_path(dn):
if os.path.isdir(os.path.join(sysroot, dn[1:])):
tmp.append(dn)
else:
if os.path.isdir(dn):
tmp.append(dn)
db_dirs_to_check = tmp
db_dirs_to_check = tmp
# Look for a version specific db-X.Y before an ambiguous dbX
# XXX should we -ever- look for a dbX name? Do any
# systems really not name their library by version and
# symlink to more general names?
for dblib in (('db-%d.%d' % db_ver),
('db%d%d' % db_ver),
('db%d' % db_ver[0])):
dblib_file = self.compiler.find_library_file(
db_dirs_to_check + lib_dirs, dblib )
if dblib_file:
dblib_dir = [ os.path.abspath(os.path.dirname(dblib_file)) ]
raise db_found
else:
if db_setup_debug: print("db lib: ", dblib, "not found")
except db_found:
if db_setup_debug:
print("bsddb using BerkeleyDB lib:", db_ver, dblib)
print("bsddb lib dir:", dblib_dir, " inc dir:", db_incdir)
dblibs = [dblib]
# Only add the found library and include directories if they aren't
# already being searched. This avoids an explicit runtime library
# dependency.
if db_incdir in inc_dirs:
db_incs = None
else:
db_incs = [db_incdir]
if dblib_dir[0] in lib_dirs:
dblib_dir = None
else:
if db_setup_debug: print("db: no appropriate library found")
db_incs = None
dblibs = []
dblib_dir = None
# The sqlite interface
sqlite_setup_debug = False # verbose debug prints from this script?
# We hunt for #define SQLITE_VERSION "n.n.n"
# We need to find >= sqlite version 3.0.8
sqlite_incdir = sqlite_libdir = None
sqlite_inc_paths = [ '/usr/include',
'/usr/include/sqlite',
'/usr/include/sqlite3',
'/usr/local/include',
'/usr/local/include/sqlite',
'/usr/local/include/sqlite3',
]
if cross_compiling:
sqlite_inc_paths = []
MIN_SQLITE_VERSION_NUMBER = (3, 0, 8)
MIN_SQLITE_VERSION = ".".join([str(x)
for x in MIN_SQLITE_VERSION_NUMBER])
# Scan the default include directories before the SQLite specific
# ones. This allows one to override the copy of sqlite on OSX,
# where /usr/include contains an old version of sqlite.
if host_platform == 'darwin':
sysroot = macosx_sdk_root()
for d_ in inc_dirs + sqlite_inc_paths:
d = d_
if host_platform == 'darwin' and is_macosx_sdk_path(d):
d = os.path.join(sysroot, d[1:])
f = os.path.join(d, "sqlite3.h")
if os.path.exists(f):
if sqlite_setup_debug: print("sqlite: found %s"%f)
with open(f) as file:
incf = file.read()
m = re.search(
r'\s*.*#\s*.*define\s.*SQLITE_VERSION\W*"([\d\.]*)"', incf)
if m:
sqlite_version = m.group(1)
sqlite_version_tuple = tuple([int(x)
for x in sqlite_version.split(".")])
if sqlite_version_tuple >= MIN_SQLITE_VERSION_NUMBER:
# we win!
if sqlite_setup_debug:
print("%s/sqlite3.h: version %s"%(d, sqlite_version))
sqlite_incdir = d
break
else:
if sqlite_setup_debug:
print("%s: version %d is too old, need >= %s"%(d,
sqlite_version, MIN_SQLITE_VERSION))
elif sqlite_setup_debug:
print("sqlite: %s had no SQLITE_VERSION"%(f,))
if sqlite_incdir:
sqlite_dirs_to_check = [
os.path.join(sqlite_incdir, '..', 'lib64'),
os.path.join(sqlite_incdir, '..', 'lib'),
os.path.join(sqlite_incdir, '..', '..', 'lib64'),
os.path.join(sqlite_incdir, '..', '..', 'lib'),
]
sqlite_libfile = self.compiler.find_library_file(
sqlite_dirs_to_check + lib_dirs, 'sqlite3')
if sqlite_libfile:
sqlite_libdir = [os.path.abspath(os.path.dirname(sqlite_libfile))]
if sqlite_incdir and sqlite_libdir:
sqlite_srcs = ['_sqlite/cache.c',
'_sqlite/connection.c',
'_sqlite/cursor.c',
'_sqlite/microprotocols.c',
'_sqlite/module.c',
'_sqlite/prepare_protocol.c',
'_sqlite/row.c',
'_sqlite/statement.c',
'_sqlite/util.c', ]
sqlite_defines = []
if host_platform != "win32":
sqlite_defines.append(('MODULE_NAME', '"sqlite3"'))
else:
sqlite_defines.append(('MODULE_NAME', '\\"sqlite3\\"'))
# Enable support for loadable extensions in the sqlite3 module
# if --enable-loadable-sqlite-extensions configure option is used.
if '--enable-loadable-sqlite-extensions' not in sysconfig.get_config_var("CONFIG_ARGS"):
sqlite_defines.append(("SQLITE_OMIT_LOAD_EXTENSION", "1"))
if host_platform == 'darwin':
# In every directory on the search path search for a dynamic
# library and then a static library, instead of first looking
# for dynamic libraries on the entire path.
# This way a statically linked custom sqlite gets picked up
# before the dynamic library in /usr/lib.
sqlite_extra_link_args = ('-Wl,-search_paths_first',)
else:
sqlite_extra_link_args = ()
include_dirs = ["Modules/_sqlite"]
# Only include the directory where sqlite was found if it does
# not already exist in set include directories, otherwise you
# can end up with a bad search path order.
if sqlite_incdir not in self.compiler.include_dirs:
include_dirs.append(sqlite_incdir)
# avoid a runtime library path for a system library dir
if sqlite_libdir and sqlite_libdir[0] in lib_dirs:
sqlite_libdir = None
exts.append(Extension('_sqlite3', sqlite_srcs,
define_macros=sqlite_defines,
include_dirs=include_dirs,
library_dirs=sqlite_libdir,
extra_link_args=sqlite_extra_link_args,
libraries=["sqlite3",]))
else:
missing.append('_sqlite3')
dbm_setup_debug = False # verbose debug prints from this script?
dbm_order = ['gdbm']
# The standard Unix dbm module:
if host_platform not in ['cygwin']:
config_args = [arg.strip("'")
for arg in sysconfig.get_config_var("CONFIG_ARGS").split()]
dbm_args = [arg for arg in config_args
if arg.startswith('--with-dbmliborder=')]
if dbm_args:
dbm_order = [arg.split('=')[-1] for arg in dbm_args][-1].split(":")
else:
dbm_order = "ndbm:gdbm:bdb".split(":")
dbmext = None
for cand in dbm_order:
if cand == "ndbm":
if find_file("ndbm.h", inc_dirs, []) is not None:
# Some systems have -lndbm, others have -lgdbm_compat,
# others don't have either
if self.compiler.find_library_file(lib_dirs,
'ndbm'):
ndbm_libs = ['ndbm']
elif self.compiler.find_library_file(lib_dirs,
'gdbm_compat'):
ndbm_libs = ['gdbm_compat']
else:
ndbm_libs = []
if dbm_setup_debug: print("building dbm using ndbm")
dbmext = Extension('_dbm', ['_dbmmodule.c'],
define_macros=[
('HAVE_NDBM_H',None),
],
libraries=ndbm_libs)
break
elif cand == "gdbm":
if self.compiler.find_library_file(lib_dirs, 'gdbm'):
gdbm_libs = ['gdbm']
if self.compiler.find_library_file(lib_dirs,
'gdbm_compat'):
gdbm_libs.append('gdbm_compat')
if find_file("gdbm/ndbm.h", inc_dirs, []) is not None:
if dbm_setup_debug: print("building dbm using gdbm")
dbmext = Extension(
'_dbm', ['_dbmmodule.c'],
define_macros=[
('HAVE_GDBM_NDBM_H', None),
],
libraries = gdbm_libs)
break
if find_file("gdbm-ndbm.h", inc_dirs, []) is not None:
if dbm_setup_debug: print("building dbm using gdbm")
dbmext = Extension(
'_dbm', ['_dbmmodule.c'],
define_macros=[
('HAVE_GDBM_DASH_NDBM_H', None),
],
libraries = gdbm_libs)
break
elif cand == "bdb":
if dblibs:
if dbm_setup_debug: print("building dbm using bdb")
dbmext = Extension('_dbm', ['_dbmmodule.c'],
library_dirs=dblib_dir,
runtime_library_dirs=dblib_dir,
include_dirs=db_incs,
define_macros=[
('HAVE_BERKDB_H', None),
('DB_DBM_HSEARCH', None),
],
libraries=dblibs)
break
if dbmext is not None:
exts.append(dbmext)
else:
missing.append('_dbm')
# Anthony Baxter's gdbm module. GNU dbm(3) will require -lgdbm:
if ('gdbm' in dbm_order and
self.compiler.find_library_file(lib_dirs, 'gdbm')):
exts.append( Extension('_gdbm', ['_gdbmmodule.c'],
libraries = ['gdbm'] ) )
else:
missing.append('_gdbm')
# Unix-only modules
if host_platform != 'win32':
# Steen Lumholt's termios module
exts.append( Extension('termios', ['termios.c']) )
# Jeremy Hylton's rlimit interface
exts.append( Extension('resource', ['resource.c']) )
# Sun yellow pages. Some systems have the functions in libc.
if (host_platform not in ['cygwin', 'qnx6'] and
find_file('rpcsvc/yp_prot.h', inc_dirs, []) is not None):
if (self.compiler.find_library_file(lib_dirs, 'nsl')):
libs = ['nsl']
else:
libs = []
exts.append( Extension('nis', ['nismodule.c'],
libraries = libs) )
else:
missing.append('nis')
else:
missing.extend(['nis', 'resource', 'termios'])
# Curses support, requiring the System V version of curses, often
# provided by the ncurses library.
curses_defines = []
curses_includes = []
panel_library = 'panel'
if curses_library == 'ncursesw':
curses_defines.append(('HAVE_NCURSESW', '1'))
curses_includes.append('/usr/include/ncursesw')
# Bug 1464056: If _curses.so links with ncursesw,
# _curses_panel.so must link with panelw.
panel_library = 'panelw'
if host_platform == 'darwin':
# On OS X, there is no separate /usr/lib/libncursesw nor
# libpanelw. If we are here, we found a locally-supplied
# version of libncursesw. There should be also be a
# libpanelw. _XOPEN_SOURCE defines are usually excluded
# for OS X but we need _XOPEN_SOURCE_EXTENDED here for
# ncurses wide char support
curses_defines.append(('_XOPEN_SOURCE_EXTENDED', '1'))
elif host_platform == 'darwin' and curses_library == 'ncurses':
# Building with the system-suppied combined libncurses/libpanel
curses_defines.append(('HAVE_NCURSESW', '1'))
curses_defines.append(('_XOPEN_SOURCE_EXTENDED', '1'))
if curses_library.startswith('ncurses'):
curses_libs = [curses_library]
exts.append( Extension('_curses', ['_cursesmodule.c'],
include_dirs=curses_includes,
define_macros=curses_defines,
libraries = curses_libs) )
elif curses_library == 'curses' and host_platform != 'darwin':
# OSX has an old Berkeley curses, not good enough for
# the _curses module.
if (self.compiler.find_library_file(lib_dirs, 'terminfo')):
curses_libs = ['curses', 'terminfo']
elif (self.compiler.find_library_file(lib_dirs, 'termcap')):
curses_libs = ['curses', 'termcap']
else:
curses_libs = ['curses']
exts.append( Extension('_curses', ['_cursesmodule.c'],
define_macros=curses_defines,
libraries = curses_libs) )
else:
missing.append('_curses')
# If the curses module is enabled, check for the panel module
if (module_enabled(exts, '_curses') and
self.compiler.find_library_file(lib_dirs, panel_library)):
exts.append( Extension('_curses_panel', ['_curses_panel.c'],
include_dirs=curses_includes,
define_macros=curses_defines,
libraries = [panel_library] + curses_libs) )
else:
missing.append('_curses_panel')
# Andrew Kuchling's zlib module. Note that some versions of zlib
# 1.1.3 have security problems. See CERT Advisory CA-2002-07:
# http://www.cert.org/advisories/CA-2002-07.html
#
# zlib 1.1.4 is fixed, but at least one vendor (RedHat) has decided to
# patch its zlib 1.1.3 package instead of upgrading to 1.1.4. For
# now, we still accept 1.1.3, because we think it's difficult to
# exploit this in Python, and we'd rather make it RedHat's problem
# than our problem <wink>.
#
# You can upgrade zlib to version 1.1.4 yourself by going to
# http://www.gzip.org/zlib/
zlib_inc = find_file('zlib.h', [], inc_dirs)
have_zlib = False
if zlib_inc is not None:
zlib_h = zlib_inc[0] + '/zlib.h'
version = '"0.0.0"'
version_req = '"1.1.3"'
if host_platform == 'darwin' and is_macosx_sdk_path(zlib_h):
zlib_h = os.path.join(macosx_sdk_root(), zlib_h[1:])
with open(zlib_h) as fp:
while 1:
line = fp.readline()
if not line:
break
if line.startswith('#define ZLIB_VERSION'):
version = line.split()[2]
break
if version >= version_req:
if (self.compiler.find_library_file(lib_dirs, 'z')):
if host_platform == "darwin":
zlib_extra_link_args = ('-Wl,-search_paths_first',)
else:
zlib_extra_link_args = ()
exts.append( Extension('zlib', ['zlibmodule.c'],
libraries = ['z'],
extra_link_args = zlib_extra_link_args))
have_zlib = True
else:
missing.append('zlib')
else:
missing.append('zlib')
else:
missing.append('zlib')
# Helper module for various ascii-encoders. Uses zlib for an optimized
# crc32 if we have it. Otherwise binascii uses its own.
if have_zlib:
extra_compile_args = ['-DUSE_ZLIB_CRC32']
libraries = ['z']
extra_link_args = zlib_extra_link_args
else:
extra_compile_args = []
libraries = []
extra_link_args = []
exts.append( Extension('binascii', ['binascii.c'],
extra_compile_args = extra_compile_args,
libraries = libraries,
extra_link_args = extra_link_args) )
# Gustavo Niemeyer's bz2 module.
if (self.compiler.find_library_file(lib_dirs, 'bz2')):
if host_platform == "darwin":
bz2_extra_link_args = ('-Wl,-search_paths_first',)
else:
bz2_extra_link_args = ()
exts.append( Extension('_bz2', ['_bz2module.c'],
libraries = ['bz2'],
extra_link_args = bz2_extra_link_args) )
else:
missing.append('_bz2')
# LZMA compression support.
if self.compiler.find_library_file(lib_dirs, 'lzma'):
exts.append( Extension('_lzma', ['_lzmamodule.c'],
libraries = ['lzma']) )
else:
missing.append('_lzma')
# Interface to the Expat XML parser
#
# Expat was written by James Clark and is now maintained by a group of
# developers on SourceForge; see www.libexpat.org for more information.
# The pyexpat module was written by Paul Prescod after a prototype by
# Jack Jansen. The Expat source is included in Modules/expat/. Usage
# of a system shared libexpat.so is possible with --with-system-expat
# configure option.
#
# More information on Expat can be found at www.libexpat.org.
#
if '--with-system-expat' in sysconfig.get_config_var("CONFIG_ARGS"):
expat_inc = []
define_macros = []
expat_lib = ['expat']
expat_sources = []
expat_depends = []
else:
expat_inc = [os.path.join(os.getcwd(), srcdir, 'Modules', 'expat')]
define_macros = [
('HAVE_EXPAT_CONFIG_H', '1'),
]
expat_lib = []
expat_sources = ['expat/xmlparse.c',
'expat/xmlrole.c',
'expat/xmltok.c']
expat_depends = ['expat/ascii.h',
'expat/asciitab.h',
'expat/expat.h',
'expat/expat_config.h',
'expat/expat_external.h',
'expat/internal.h',
'expat/latin1tab.h',
'expat/utf8tab.h',
'expat/xmlrole.h',
'expat/xmltok.h',
'expat/xmltok_impl.h'
]
exts.append(Extension('pyexpat',
define_macros = define_macros,
include_dirs = expat_inc,
libraries = expat_lib,
sources = ['pyexpat.c'] + expat_sources,
depends = expat_depends,
))
# Fredrik Lundh's cElementTree module. Note that this also
# uses expat (via the CAPI hook in pyexpat).
if os.path.isfile(os.path.join(srcdir, 'Modules', '_elementtree.c')):
define_macros.append(('USE_PYEXPAT_CAPI', None))
exts.append(Extension('_elementtree',
define_macros = define_macros,
include_dirs = expat_inc,
libraries = expat_lib,
sources = ['_elementtree.c'],
depends = ['pyexpat.c'] + expat_sources +
expat_depends,
))
else:
missing.append('_elementtree')
# Hye-Shik Chang's CJKCodecs modules.
exts.append(Extension('_multibytecodec',
['cjkcodecs/multibytecodec.c']))
for loc in ('kr', 'jp', 'cn', 'tw', 'hk', 'iso2022'):
exts.append(Extension('_codecs_%s' % loc,
['cjkcodecs/_codecs_%s.c' % loc]))
# Stefan Krah's _decimal module
exts.append(self._decimal_ext())
# Thomas Heller's _ctypes module
self.detect_ctypes(inc_dirs, lib_dirs)
# Richard Oudkerk's multiprocessing module
if host_platform == 'win32': # Windows
macros = dict()
libraries = ['ws2_32']
elif host_platform == 'darwin': # Mac OSX
macros = dict()
libraries = []
elif host_platform == 'cygwin': # Cygwin
macros = dict()
libraries = []
elif host_platform in ('freebsd4', 'freebsd5', 'freebsd6', 'freebsd7', 'freebsd8'):
# FreeBSD's P1003.1b semaphore support is very experimental
# and has many known problems. (as of June 2008)
macros = dict()
libraries = []
elif host_platform.startswith('openbsd'):
macros = dict()
libraries = []
elif host_platform.startswith('netbsd'):
macros = dict()
libraries = []
else: # Linux and other unices
macros = dict()
libraries = ['rt']
if host_platform == 'win32':
multiprocessing_srcs = [ '_multiprocessing/multiprocessing.c',
'_multiprocessing/semaphore.c',
]
else:
multiprocessing_srcs = [ '_multiprocessing/multiprocessing.c',
]
if (sysconfig.get_config_var('HAVE_SEM_OPEN') and not
sysconfig.get_config_var('POSIX_SEMAPHORES_NOT_ENABLED')):
multiprocessing_srcs.append('_multiprocessing/semaphore.c')
if sysconfig.get_config_var('WITH_THREAD'):
exts.append ( Extension('_multiprocessing', multiprocessing_srcs,
define_macros=list(macros.items()),
include_dirs=["Modules/_multiprocessing"]))
else:
missing.append('_multiprocessing')
# End multiprocessing
# Platform-specific libraries
if host_platform.startswith(('linux', 'freebsd', 'gnukfreebsd')):
exts.append( Extension('ossaudiodev', ['ossaudiodev.c']) )
else:
missing.append('ossaudiodev')
if host_platform == 'darwin':
exts.append(
Extension('_scproxy', ['_scproxy.c'],
extra_link_args=[
'-framework', 'SystemConfiguration',
'-framework', 'CoreFoundation',
]))
self.extensions.extend(exts)
# Call the method for detecting whether _tkinter can be compiled
self.detect_tkinter(inc_dirs, lib_dirs)
if '_tkinter' not in [e.name for e in self.extensions]:
missing.append('_tkinter')
## # Uncomment these lines if you want to play with xxmodule.c
## ext = Extension('xx', ['xxmodule.c'])
## self.extensions.append(ext)
if 'd' not in sys.abiflags:
ext = Extension('xxlimited', ['xxlimited.c'],
define_macros=[('Py_LIMITED_API', '0x03040000')])
self.extensions.append(ext)
return missing
def detect_tkinter_explicitly(self):
# Build _tkinter using explicit locations for Tcl/Tk.
#
# This is enabled when both arguments are given to ./configure:
#
# --with-tcltk-includes="-I/path/to/tclincludes \
# -I/path/to/tkincludes"
# --with-tcltk-libs="-L/path/to/tcllibs -ltclm.n \
# -L/path/to/tklibs -ltkm.n"
#
# These values can also be specified or overriden via make:
# make TCLTK_INCLUDES="..." TCLTK_LIBS="..."
#
# This can be useful for building and testing tkinter with multiple
# versions of Tcl/Tk. Note that a build of Tk depends on a particular
# build of Tcl so you need to specify both arguments and use care when
# overriding.
# The _TCLTK variables are created in the Makefile sharedmods target.
tcltk_includes = os.environ.get('_TCLTK_INCLUDES')
tcltk_libs = os.environ.get('_TCLTK_LIBS')
if not (tcltk_includes and tcltk_libs):
# Resume default configuration search.
return 0
extra_compile_args = tcltk_includes.split()
extra_link_args = tcltk_libs.split()
ext = Extension('_tkinter', ['_tkinter.c', 'tkappinit.c'],
define_macros=[('WITH_APPINIT', 1)],
extra_compile_args = extra_compile_args,
extra_link_args = extra_link_args,
)
self.extensions.append(ext)
return 1
def detect_tkinter_darwin(self, inc_dirs, lib_dirs):
# The _tkinter module, using frameworks. Since frameworks are quite
# different the UNIX search logic is not sharable.
from os.path import join, exists
framework_dirs = [
'/Library/Frameworks',
'/System/Library/Frameworks/',
join(os.getenv('HOME'), '/Library/Frameworks')
]
sysroot = macosx_sdk_root()
# Find the directory that contains the Tcl.framework and Tk.framework
# bundles.
# XXX distutils should support -F!
for F in framework_dirs:
# both Tcl.framework and Tk.framework should be present
for fw in 'Tcl', 'Tk':
if is_macosx_sdk_path(F):
if not exists(join(sysroot, F[1:], fw + '.framework')):
break
else:
if not exists(join(F, fw + '.framework')):
break
else:
# ok, F is now directory with both frameworks. Continure
# building
break
else:
# Tk and Tcl frameworks not found. Normal "unix" tkinter search
# will now resume.
return 0
# For 8.4a2, we must add -I options that point inside the Tcl and Tk
# frameworks. In later release we should hopefully be able to pass
# the -F option to gcc, which specifies a framework lookup path.
#
include_dirs = [
join(F, fw + '.framework', H)
for fw in ('Tcl', 'Tk')
for H in ('Headers', 'Versions/Current/PrivateHeaders')
]
# For 8.4a2, the X11 headers are not included. Rather than include a
# complicated search, this is a hard-coded path. It could bail out
# if X11 libs are not found...
include_dirs.append('/usr/X11R6/include')
frameworks = ['-framework', 'Tcl', '-framework', 'Tk']
# All existing framework builds of Tcl/Tk don't support 64-bit
# architectures.
cflags = sysconfig.get_config_vars('CFLAGS')[0]
archs = re.findall('-arch\s+(\w+)', cflags)
tmpfile = os.path.join(self.build_temp, 'tk.arch')
if not os.path.exists(self.build_temp):
os.makedirs(self.build_temp)
# Note: cannot use os.popen or subprocess here, that
# requires extensions that are not available here.
if is_macosx_sdk_path(F):
os.system("file %s/Tk.framework/Tk | grep 'for architecture' > %s"%(os.path.join(sysroot, F[1:]), tmpfile))
else:
os.system("file %s/Tk.framework/Tk | grep 'for architecture' > %s"%(F, tmpfile))
with open(tmpfile) as fp:
detected_archs = []
for ln in fp:
a = ln.split()[-1]
if a in archs:
detected_archs.append(ln.split()[-1])
os.unlink(tmpfile)
for a in detected_archs:
frameworks.append('-arch')
frameworks.append(a)
ext = Extension('_tkinter', ['_tkinter.c', 'tkappinit.c'],
define_macros=[('WITH_APPINIT', 1)],
include_dirs = include_dirs,
libraries = [],
extra_compile_args = frameworks[2:],
extra_link_args = frameworks,
)
self.extensions.append(ext)
return 1
def detect_tkinter(self, inc_dirs, lib_dirs):
# The _tkinter module.
# Check whether --with-tcltk-includes and --with-tcltk-libs were
# configured or passed into the make target. If so, use these values
# to build tkinter and bypass the searches for Tcl and TK in standard
# locations.
if self.detect_tkinter_explicitly():
return
# Rather than complicate the code below, detecting and building
# AquaTk is a separate method. Only one Tkinter will be built on
# Darwin - either AquaTk, if it is found, or X11 based Tk.
if (host_platform == 'darwin' and
self.detect_tkinter_darwin(inc_dirs, lib_dirs)):
return
# Assume we haven't found any of the libraries or include files
# The versions with dots are used on Unix, and the versions without
# dots on Windows, for detection by cygwin.
tcllib = tklib = tcl_includes = tk_includes = None
for version in ['8.6', '86', '8.5', '85', '8.4', '84', '8.3', '83',
'8.2', '82', '8.1', '81', '8.0', '80']:
tklib = self.compiler.find_library_file(lib_dirs,
'tk' + version)
tcllib = self.compiler.find_library_file(lib_dirs,
'tcl' + version)
if tklib and tcllib:
# Exit the loop when we've found the Tcl/Tk libraries
break
# Now check for the header files
if tklib and tcllib:
# Check for the include files on Debian and {Free,Open}BSD, where
# they're put in /usr/include/{tcl,tk}X.Y
dotversion = version
if '.' not in dotversion and "bsd" in host_platform.lower():
# OpenBSD and FreeBSD use Tcl/Tk library names like libtcl83.a,
# but the include subdirs are named like .../include/tcl8.3.
dotversion = dotversion[:-1] + '.' + dotversion[-1]
tcl_include_sub = []
tk_include_sub = []
for dir in inc_dirs:
tcl_include_sub += [dir + os.sep + "tcl" + dotversion]
tk_include_sub += [dir + os.sep + "tk" + dotversion]
tk_include_sub += tcl_include_sub
tcl_includes = find_file('tcl.h', inc_dirs, tcl_include_sub)
tk_includes = find_file('tk.h', inc_dirs, tk_include_sub)
if (tcllib is None or tklib is None or
tcl_includes is None or tk_includes is None):
self.announce("INFO: Can't locate Tcl/Tk libs and/or headers", 2)
return
# OK... everything seems to be present for Tcl/Tk.
include_dirs = [] ; libs = [] ; defs = [] ; added_lib_dirs = []
for dir in tcl_includes + tk_includes:
if dir not in include_dirs:
include_dirs.append(dir)
# Check for various platform-specific directories
if host_platform == 'sunos5':
include_dirs.append('/usr/openwin/include')
added_lib_dirs.append('/usr/openwin/lib')
elif os.path.exists('/usr/X11R6/include'):
include_dirs.append('/usr/X11R6/include')
added_lib_dirs.append('/usr/X11R6/lib64')
added_lib_dirs.append('/usr/X11R6/lib')
elif os.path.exists('/usr/X11R5/include'):
include_dirs.append('/usr/X11R5/include')
added_lib_dirs.append('/usr/X11R5/lib')
else:
# Assume default location for X11
include_dirs.append('/usr/X11/include')
added_lib_dirs.append('/usr/X11/lib')
# If Cygwin, then verify that X is installed before proceeding
if host_platform == 'cygwin':
x11_inc = find_file('X11/Xlib.h', [], include_dirs)
if x11_inc is None:
return
# Check for BLT extension
if self.compiler.find_library_file(lib_dirs + added_lib_dirs,
'BLT8.0'):
defs.append( ('WITH_BLT', 1) )
libs.append('BLT8.0')
elif self.compiler.find_library_file(lib_dirs + added_lib_dirs,
'BLT'):
defs.append( ('WITH_BLT', 1) )
libs.append('BLT')
# Add the Tcl/Tk libraries
libs.append('tk'+ version)
libs.append('tcl'+ version)
if host_platform in ['aix3', 'aix4']:
libs.append('ld')
# Finally, link with the X11 libraries (not appropriate on cygwin)
if host_platform != "cygwin":
libs.append('X11')
ext = Extension('_tkinter', ['_tkinter.c', 'tkappinit.c'],
define_macros=[('WITH_APPINIT', 1)] + defs,
include_dirs = include_dirs,
libraries = libs,
library_dirs = added_lib_dirs,
)
self.extensions.append(ext)
# XXX handle these, but how to detect?
# *** Uncomment and edit for PIL (TkImaging) extension only:
# -DWITH_PIL -I../Extensions/Imaging/libImaging tkImaging.c \
# *** Uncomment and edit for TOGL extension only:
# -DWITH_TOGL togl.c \
# *** Uncomment these for TOGL extension only:
# -lGL -lGLU -lXext -lXmu \
def configure_ctypes_darwin(self, ext):
# Darwin (OS X) uses preconfigured files, in
# the Modules/_ctypes/libffi_osx directory.
srcdir = sysconfig.get_config_var('srcdir')
ffi_srcdir = os.path.abspath(os.path.join(srcdir, 'Modules',
'_ctypes', 'libffi_osx'))
sources = [os.path.join(ffi_srcdir, p)
for p in ['ffi.c',
'x86/darwin64.S',
'x86/x86-darwin.S',
'x86/x86-ffi_darwin.c',
'x86/x86-ffi64.c',
'powerpc/ppc-darwin.S',
'powerpc/ppc-darwin_closure.S',
'powerpc/ppc-ffi_darwin.c',
'powerpc/ppc64-darwin_closure.S',
]]
# Add .S (preprocessed assembly) to C compiler source extensions.
self.compiler.src_extensions.append('.S')
include_dirs = [os.path.join(ffi_srcdir, 'include'),
os.path.join(ffi_srcdir, 'powerpc')]
ext.include_dirs.extend(include_dirs)
ext.sources.extend(sources)
return True
def configure_ctypes(self, ext):
if not self.use_system_libffi:
if host_platform == 'darwin':
return self.configure_ctypes_darwin(ext)
srcdir = sysconfig.get_config_var('srcdir')
ffi_builddir = os.path.join(self.build_temp, 'libffi')
ffi_srcdir = os.path.abspath(os.path.join(srcdir, 'Modules',
'_ctypes', 'libffi'))
ffi_configfile = os.path.join(ffi_builddir, 'fficonfig.py')
from distutils.dep_util import newer_group
config_sources = [os.path.join(ffi_srcdir, fname)
for fname in os.listdir(ffi_srcdir)
if os.path.isfile(os.path.join(ffi_srcdir, fname))]
if self.force or newer_group(config_sources,
ffi_configfile):
from distutils.dir_util import mkpath
mkpath(ffi_builddir)
config_args = [arg for arg in sysconfig.get_config_var("CONFIG_ARGS").split()
if (('--host=' in arg) or ('--build=' in arg))]
if not self.verbose:
config_args.append("-q")
# Pass empty CFLAGS because we'll just append the resulting
# CFLAGS to Python's; -g or -O2 is to be avoided.
cmd = "cd %s && env CFLAGS='' '%s/configure' %s" \
% (ffi_builddir, ffi_srcdir, " ".join(config_args))
res = os.system(cmd)
if res or not os.path.exists(ffi_configfile):
print("Failed to configure _ctypes module")
return False
fficonfig = {}
with open(ffi_configfile) as f:
exec(f.read(), globals(), fficonfig)
# Add .S (preprocessed assembly) to C compiler source extensions.
self.compiler.src_extensions.append('.S')
include_dirs = [os.path.join(ffi_builddir, 'include'),
ffi_builddir,
os.path.join(ffi_srcdir, 'src')]
extra_compile_args = fficonfig['ffi_cflags'].split()
ext.sources.extend(os.path.join(ffi_srcdir, f) for f in
fficonfig['ffi_sources'])
ext.include_dirs.extend(include_dirs)
ext.extra_compile_args.extend(extra_compile_args)
return True
def detect_ctypes(self, inc_dirs, lib_dirs):
self.use_system_libffi = False
include_dirs = []
extra_compile_args = []
extra_link_args = []
sources = ['_ctypes/_ctypes.c',
'_ctypes/callbacks.c',
'_ctypes/callproc.c',
'_ctypes/stgdict.c',
'_ctypes/cfield.c']
depends = ['_ctypes/ctypes.h']
if host_platform == 'darwin':
sources.append('_ctypes/malloc_closure.c')
sources.append('_ctypes/darwin/dlfcn_simple.c')
extra_compile_args.append('-DMACOSX')
include_dirs.append('_ctypes/darwin')
# XXX Is this still needed?
## extra_link_args.extend(['-read_only_relocs', 'warning'])
elif host_platform == 'sunos5':
# XXX This shouldn't be necessary; it appears that some
# of the assembler code is non-PIC (i.e. it has relocations
# when it shouldn't. The proper fix would be to rewrite
# the assembler code to be PIC.
# This only works with GCC; the Sun compiler likely refuses
# this option. If you want to compile ctypes with the Sun
# compiler, please research a proper solution, instead of
# finding some -z option for the Sun compiler.
extra_link_args.append('-mimpure-text')
elif host_platform.startswith('hp-ux'):
extra_link_args.append('-fPIC')
ext = Extension('_ctypes',
include_dirs=include_dirs,
extra_compile_args=extra_compile_args,
extra_link_args=extra_link_args,
libraries=[],
sources=sources,
depends=depends)
ext_test = Extension('_ctypes_test',
sources=['_ctypes/_ctypes_test.c'])
self.extensions.extend([ext, ext_test])
if not '--with-system-ffi' in sysconfig.get_config_var("CONFIG_ARGS"):
return
if host_platform == 'darwin':
# OS X 10.5 comes with libffi.dylib; the include files are
# in /usr/include/ffi
inc_dirs.append('/usr/include/ffi')
ffi_inc = [sysconfig.get_config_var("LIBFFI_INCLUDEDIR")]
if not ffi_inc or ffi_inc[0] == '':
ffi_inc = find_file('ffi.h', [], inc_dirs)
if ffi_inc is not None:
ffi_h = ffi_inc[0] + '/ffi.h'
with open(ffi_h) as fp:
while 1:
line = fp.readline()
if not line:
ffi_inc = None
break
if line.startswith('#define LIBFFI_H'):
break
ffi_lib = None
if ffi_inc is not None:
for lib_name in ('ffi_convenience', 'ffi_pic', 'ffi'):
if (self.compiler.find_library_file(lib_dirs, lib_name)):
ffi_lib = lib_name
break
if ffi_inc and ffi_lib:
ext.include_dirs.extend(ffi_inc)
ext.libraries.append(ffi_lib)
self.use_system_libffi = True
def _decimal_ext(self):
extra_compile_args = []
undef_macros = []
if '--with-system-libmpdec' in sysconfig.get_config_var("CONFIG_ARGS"):
include_dirs = []
libraries = [':libmpdec.so.2']
sources = ['_decimal/_decimal.c']
depends = ['_decimal/docstrings.h']
else:
srcdir = sysconfig.get_config_var('srcdir')
include_dirs = [os.path.abspath(os.path.join(srcdir,
'Modules',
'_decimal',
'libmpdec'))]
libraries = []
sources = [
'_decimal/_decimal.c',
'_decimal/libmpdec/basearith.c',
'_decimal/libmpdec/constants.c',
'_decimal/libmpdec/context.c',
'_decimal/libmpdec/convolute.c',
'_decimal/libmpdec/crt.c',
'_decimal/libmpdec/difradix2.c',
'_decimal/libmpdec/fnt.c',
'_decimal/libmpdec/fourstep.c',
'_decimal/libmpdec/io.c',
'_decimal/libmpdec/memory.c',
'_decimal/libmpdec/mpdecimal.c',
'_decimal/libmpdec/numbertheory.c',
'_decimal/libmpdec/sixstep.c',
'_decimal/libmpdec/transpose.c',
]
depends = [
'_decimal/docstrings.h',
'_decimal/libmpdec/basearith.h',
'_decimal/libmpdec/bits.h',
'_decimal/libmpdec/constants.h',
'_decimal/libmpdec/convolute.h',
'_decimal/libmpdec/crt.h',
'_decimal/libmpdec/difradix2.h',
'_decimal/libmpdec/fnt.h',
'_decimal/libmpdec/fourstep.h',
'_decimal/libmpdec/io.h',
'_decimal/libmpdec/memory.h',
'_decimal/libmpdec/mpdecimal.h',
'_decimal/libmpdec/numbertheory.h',
'_decimal/libmpdec/sixstep.h',
'_decimal/libmpdec/transpose.h',
'_decimal/libmpdec/typearith.h',
'_decimal/libmpdec/umodarith.h',
]
config = {
'x64': [('CONFIG_64','1'), ('ASM','1')],
'uint128': [('CONFIG_64','1'), ('ANSI','1'), ('HAVE_UINT128_T','1')],
'ansi64': [('CONFIG_64','1'), ('ANSI','1')],
'ppro': [('CONFIG_32','1'), ('PPRO','1'), ('ASM','1')],
'ansi32': [('CONFIG_32','1'), ('ANSI','1')],
'ansi-legacy': [('CONFIG_32','1'), ('ANSI','1'),
('LEGACY_COMPILER','1')],
'universal': [('UNIVERSAL','1')]
}
cc = sysconfig.get_config_var('CC')
sizeof_size_t = sysconfig.get_config_var('SIZEOF_SIZE_T')
machine = os.environ.get('PYTHON_DECIMAL_WITH_MACHINE')
if machine:
# Override automatic configuration to facilitate testing.
define_macros = config[machine]
elif host_platform == 'darwin':
# Universal here means: build with the same options Python
# was built with.
define_macros = config['universal']
elif sizeof_size_t == 8:
if sysconfig.get_config_var('HAVE_GCC_ASM_FOR_X64'):
define_macros = config['x64']
elif sysconfig.get_config_var('HAVE_GCC_UINT128_T'):
define_macros = config['uint128']
else:
define_macros = config['ansi64']
elif sizeof_size_t == 4:
ppro = sysconfig.get_config_var('HAVE_GCC_ASM_FOR_X87')
if ppro and ('gcc' in cc or 'clang' in cc) and \
not 'sunos' in host_platform:
# solaris: problems with register allocation.
# icc >= 11.0 works as well.
define_macros = config['ppro']
extra_compile_args.append('-Wno-unknown-pragmas')
else:
define_macros = config['ansi32']
else:
raise DistutilsError("_decimal: unsupported architecture")
# Workarounds for toolchain bugs:
if sysconfig.get_config_var('HAVE_IPA_PURE_CONST_BUG'):
# Some versions of gcc miscompile inline asm:
# http://gcc.gnu.org/bugzilla/show_bug.cgi?id=46491
# http://gcc.gnu.org/ml/gcc/2010-11/msg00366.html
extra_compile_args.append('-fno-ipa-pure-const')
if sysconfig.get_config_var('HAVE_GLIBC_MEMMOVE_BUG'):
# _FORTIFY_SOURCE wrappers for memmove and bcopy are incorrect:
# http://sourceware.org/ml/libc-alpha/2010-12/msg00009.html
undef_macros.append('_FORTIFY_SOURCE')
# Faster version without thread local contexts:
if not sysconfig.get_config_var('WITH_THREAD'):
define_macros.append(('WITHOUT_THREADS', 1))
# Increase warning level for gcc:
if 'gcc' in cc:
cmd = ("echo '' | %s -Wextra -Wno-missing-field-initializers -E - "
"> /dev/null 2>&1" % cc)
ret = os.system(cmd)
if ret >> 8 == 0:
extra_compile_args.extend(['-Wextra',
'-Wno-missing-field-initializers'])
# Uncomment for extra functionality:
#define_macros.append(('EXTRA_FUNCTIONALITY', 1))
ext = Extension (
'_decimal',
include_dirs=include_dirs,
libraries=libraries,
define_macros=define_macros,
undef_macros=undef_macros,
extra_compile_args=extra_compile_args,
sources=sources,
depends=depends
)
return ext
class PyBuildInstall(install):
# Suppress the warning about installation into the lib_dynload
# directory, which is not in sys.path when running Python during
# installation:
def initialize_options (self):
install.initialize_options(self)
self.warn_dir=0
# Customize subcommands to not install an egg-info file for Python
sub_commands = [('install_lib', install.has_lib),
('install_headers', install.has_headers),
('install_scripts', install.has_scripts),
('install_data', install.has_data)]
class PyBuildInstallLib(install_lib):
# Do exactly what install_lib does but make sure correct access modes get
# set on installed directories and files. All installed files with get
# mode 644 unless they are a shared library in which case they will get
# mode 755. All installed directories will get mode 755.
# this is works for EXT_SUFFIX too, which ends with SHLIB_SUFFIX
shlib_suffix = sysconfig.get_config_var("SHLIB_SUFFIX")
def install(self):
outfiles = install_lib.install(self)
self.set_file_modes(outfiles, 0o644, 0o755)
self.set_dir_modes(self.install_dir, 0o755)
return outfiles
def set_file_modes(self, files, defaultMode, sharedLibMode):
if not self.is_chmod_supported(): return
if not files: return
for filename in files:
if os.path.islink(filename): continue
mode = defaultMode
if filename.endswith(self.shlib_suffix): mode = sharedLibMode
log.info("changing mode of %s to %o", filename, mode)
if not self.dry_run: os.chmod(filename, mode)
def set_dir_modes(self, dirname, mode):
if not self.is_chmod_supported(): return
for dirpath, dirnames, fnames in os.walk(dirname):
if os.path.islink(dirpath):
continue
log.info("changing mode of %s to %o", dirpath, mode)
if not self.dry_run: os.chmod(dirpath, mode)
def is_chmod_supported(self):
return hasattr(os, 'chmod')
class PyBuildScripts(build_scripts):
def copy_scripts(self):
outfiles, updated_files = build_scripts.copy_scripts(self)
fullversion = '-{0[0]}.{0[1]}'.format(sys.version_info)
minoronly = '.{0[1]}'.format(sys.version_info)
newoutfiles = []
newupdated_files = []
for filename in outfiles:
if filename.endswith(('2to3', 'pyvenv')):
newfilename = filename + fullversion
else:
newfilename = filename + minoronly
log.info('renaming {} to {}'.format(filename, newfilename))
os.rename(filename, newfilename)
newoutfiles.append(newfilename)
if filename in updated_files:
newupdated_files.append(newfilename)
return newoutfiles, newupdated_files
SUMMARY = """
Python is an interpreted, interactive, object-oriented programming
language. It is often compared to Tcl, Perl, Scheme or Java.
Python combines remarkable power with very clear syntax. It has
modules, classes, exceptions, very high level dynamic data types, and
dynamic typing. There are interfaces to many system calls and
libraries, as well as to various windowing systems (X11, Motif, Tk,
Mac, MFC). New built-in modules are easily written in C or C++. Python
is also usable as an extension language for applications that need a
programmable interface.
The Python implementation is portable: it runs on many brands of UNIX,
on Windows, DOS, Mac, Amiga... If your favorite system isn't
listed here, it may still be supported, if there's a C compiler for
it. Ask around on comp.lang.python -- or just try compiling Python
yourself.
"""
CLASSIFIERS = """
Development Status :: 6 - Mature
License :: OSI Approved :: Python Software Foundation License
Natural Language :: English
Programming Language :: C
Programming Language :: Python
Topic :: Software Development
"""
def main():
# turn off warnings when deprecated modules are imported
import warnings
warnings.filterwarnings("ignore",category=DeprecationWarning)
setup(# PyPI Metadata (PEP 301)
name = "Python",
version = sys.version.split()[0],
url = "http://www.python.org/%s" % sys.version[:3],
maintainer = "Guido van Rossum and the Python community",
maintainer_email = "python-dev@python.org",
description = "A high-level object-oriented programming language",
long_description = SUMMARY.strip(),
license = "PSF license",
classifiers = [x for x in CLASSIFIERS.split("\n") if x],
platforms = ["Many"],
# Build info
cmdclass = {'build_ext': PyBuildExt,
'build_scripts': PyBuildScripts,
'install': PyBuildInstall,
'install_lib': PyBuildInstallLib},
# The struct module is defined here, because build_ext won't be
# called unless there's at least one extension module defined.
ext_modules=[Extension('_struct', ['_struct.c'])],
# If you change the scripts installed here, you also need to
# check the PyBuildScripts command above, and change the links
# created by the bininstall target in Makefile.pre.in
scripts = ["Tools/scripts/pydoc3", "Tools/scripts/idle3",
"Tools/scripts/2to3", "Tools/scripts/pyvenv"]
)
# --install-platlib
if __name__ == '__main__':
main()
|
Orav/kbengine
|
kbe/src/lib/python/setup.py
|
Python
|
lgpl-3.0
| 100,060
|
[
"VisIt"
] |
3cfc575ad69c7418e4cea6dbca4e79aaa8c9e290a5dafab3a222f471fe8a6afb
|
# This new verion (6/11/10) hasn't been tested yet.
# Can't run unit tests on my MacPro w/o Nio.
#---------------------------------------------------
# S.D. Peckham
# Sept 2014 (new version to use netCDF4)
# May, June 2010
import os
import sys
import time
import numpy as np
import file_utils
import netCDF4 as nc
#-------------------------------------------------------------------
# This class is for I/O of time series data to netCDF files.
#-------------------------------------------------------------------
#
# unit_test1()
# unit_test2()
# save_as_text() # (not ready yet)
#
# class ncts_file():
#
# import_netCDF4()
# open_file()
# get_dtype_map()
# open_new_file()
# update_time_index()
#-----------------------------
# add_value()
# get_value()
#-----------------------------
# values_at_IDs()
# add_values_at_IDs()
#-----------------------------
# add_series()
# get_series()
#-----------------------------
# close_file()
# close()
#
#-------------------------------------------------------------------
def unit_test1(n_values=10, VERBOSE=False,
file_name="NCTS_Series_Test.nc"):
#--------------------------------------------------------
# Notes: This test uses add_value() and get_value() to
# add and retrieve a time series to/from a file,
# one value at a time.
#--------------------------------------------------------
print ' '
print 'Running unit_test1()...'
#-------------------------------------
# Make instance of ncts_file() class
#-------------------------------------
ncts = ncts_file()
var_names = ['depth']
OK = ncts.open_new_file( file_name,
var_names=var_names,
long_names=["depth of water"],
units_names=["meters"],
dtypes=['float32'],
comment="Created by TopoFlow 3.0.")
## time_long_name='time',
## time_units_name="minutes")
###########################################################
# WHAT ABOUT UNITS AND LONG_NAME for the TIME VALUES ??
###########################################################
if not(OK):
print 'ERROR during open_new_file().'
return
series = np.sqrt(np.arange( n_values, dtype='Float32'))
times = np.arange( n_values, dtype='Float32') * 60.0
#--------------------------
# Add time series to file
#--------------------------
print 'Writing values to NCTS file...'
for time_index in xrange(n_values):
time = times[ time_index ]
value = series[ time_index ]
ncts.add_value( value, var_names[0], time )
#----------------------------------------
ncts.update_time_index()
if (VERBOSE):
print self.ncts_unit # (print a summary)
ncts.close_file()
print 'Finished writing ncts file: ' + file_name
print ' '
#--------------------------------------------
# Re-open the file and read the time series
#--------------------------------------------
OK = ncts.open_file( file_name )
if not(OK): return
print 'Reading values from ncts file: '
for time_index in xrange(n_values):
value, time = ncts.get_value(var_names[0], time_index)
ti_str = str(time_index)
t_str = 'time[' + ti_str + '], '
v_str = 'value[' + ti_str + '] = '
print (t_str + v_str), time, value
## print '-----------------------------------------------'
#-----------------
# Close the file
#-----------------
ncts.close_file()
print 'Finished reading ncts file: ' + file_name
print ' '
# unit_test1()
#-------------------------------------------------------------------
def unit_test2(n_values=10, VERBOSE=False,
file_name="NCTS_Series_Test.nc"):
#--------------------------------------------------------
# Notes: This test uses add_series() and get_series() to
# add and retrieve a time series to/from a file,
# all values at once.
#--------------------------------------------------------
print ' '
print 'Running unit_test2()...'
#-------------------------------------
# Make instance of ncts_file() class
#-------------------------------------
ncts = ncts_file()
var_name = "depth"
OK = ncts.open_new_file( file_name,
var_names=[var_name],
long_names=["depth of water"],
units_names=["meters"],
dtypes=['float32'],
time_units='minutes',
comment="Created by TopoFlow 3.0.")
###############################################
# WHAT ABOUT LONG_NAME for the TIME VALUES ??
###############################################
if not(OK):
print 'ERROR during open_new_file().'
return
series = np.sqrt(np.arange( n_values, dtype='Float32'))
times = np.arange( n_values, dtype='Float32') * 60.0
#--------------------------
# Add time series to file
#--------------------------
print 'Writing values to NCTS file...'
ncts.add_series( series, var_names[0], times )
#--------------------------------------------
ncts.update_time_index( step=n_values )
if (VERBOSE):
print self.ncts_unit # (print a summary)
ncts.close_file()
print 'Finished writing ncts file: ' + file_name
print ' '
#--------------------------------------------
# Re-open the file and read the time series
#--------------------------------------------
OK = ncts.open_file( file_name )
if not(OK): return
print 'Reading values from ncts file: '
series, times = ncts.get_series( var_names[0] )
for n in xrange(n_values):
time = times[n]
value = series[n]
ti_str = str(n)
t_str = 'time[' + ti_str + '], '
v_str = 'value[' + ti_str + '] = '
print (t_str + v_str), time, value
## print '-----------------------------------------------'
#-----------------
# Close the file
#-----------------
ncts.close_file()
print 'Finished reading ncts file: ' + file_name
print ' '
# unit_test2()
#-------------------------------------------------------------------
def save_as_text(ncts_file_name=None, text_file_name=None):
ncts = ncts_file()
OK = ncts.open_file( ncts_file_name )
if not(OK): return
var_name = 'H'
data = ncts.get_series( var_name )
ncts.close()
data = np.array( data )
print 'min(data), max(data) =', data.min(), data.max()
text_unit = open( text_file_name, 'w' )
data.tofile( unit ) ###### CHECK THIS #######
text_unit.close()
# save_as_text()
#-------------------------------------------------------------------
class ncts_file():
#----------------------------------------------------------
# Note: ncts = NetCDF Time Series (used by CSDMS)
#----------------------------------------------------------
def import_netCDF4(self):
try:
import netCDF4
# print 'Imported netCDF4 version: ' + netCDF4.__version__
return netCDF4
except:
## print ' '
## print 'SORRY, Cannot write netCDF files because'
## print 'the "netCDF4" package cannot be imported.'
## print ' '
## python_version = sys.version[:3]
## if (python_version != '2.6'):
## print 'Note that "PyNIO" is only installed for'
## print 'Python version 2.6 on "beach".'
## print 'The current Python version is:', python_version
## print ' '
return False
# import_netCDF4()
#----------------------------------------------------------
def open_file(self, file_name):
#-------------------------
# Open file to read only
#-------------------------
try:
ncts_unit = nc.Dataset(file_name, mode='r')
self.ncts_unit = ncts_unit
### return ncts_unit
return True
except:
return False
# open_file()
#----------------------------------------------------------
def get_dtype_map(self):
#----------------------------------------
# Possible settings for "dtype_code"
#----------------------------------------------------
# These two-char codes are used for netCDF4 package
#----------------------------------------------------
# See: http://unidata.github.io/netcdf4-python/
#----------------------------------------------------
dtype_map = {'float64':'f8', 'float32':'f4',
'int64':'i8', 'int32':'i4',
'int16':'i2', 'int8':'i1',
'S|100':'S1'} # ( ????? )
#-------------------------------------------------
# These one-char codes are used for Nio in PyNIO
#-------------------------------------------------
# dtype_code = "d" # (double, Float64)
# dtype_code = "f" # (float, Float32)
# dtype_code = "l" # (long, Int64)
# dtype_code = "i" # (int, Int32)
# dtype_code = "h" # (short, Int16)
# dtype_code = "b" # (byte, Int8)
# dtype_code = "S1" # (char)
#-------------------------------------------
# dtype_map = {'float64':'d', 'float32':'f',
# 'int64':'l', 'int32':'i',
# 'int16':'s', 'int8':'b',
# 'S|100':'S1'} # (check last entry)
return dtype_map
# get_dtype_map()
#----------------------------------------------------------
def open_new_file(self, file_name,
var_names=['X'],
long_names=[None],
units_names=['None'],
dtypes=['float32'],
### dtypes=['float64'],
time_units='minutes',
comment=''):
#----------------------------
# Does file already exist ?
#----------------------------
file_name = file_utils.check_overwrite( file_name )
#---------------------------------------
# Check and store the time series info
#---------------------------------------
self.format = 'ncts'
self.file_name = file_name
self.time_index = 0
if (long_names[0] is None):
long_names = var_names
#-------------------------------------------
# We may not need to save these in self.
# I don't think they're used anywhere yet.
#-------------------------------------------
self.var_names = var_names
self.long_names = long_names
self.units_names = units_names
self.time_units = time_units
self.dtypes = dtypes
#---------------------------------------------
# Create array of dtype codes from dtypes
# for multiple time series (i.e. columns).
#---------------------------------------------
dtype_map = self.get_dtype_map()
dtype_codes = []
if (len(dtypes) == len(var_names)):
for dtype in dtypes:
dtype_code = dtype_map[ dtype.lower() ]
dtype_codes.append( dtype_code )
else:
dtype = dtypes[0]
dtype_code = dtype_map[ dtype.lower() ]
for k in xrange(len(var_names)):
dtype_codes.append( dtype_code )
self.dtype_codes = dtype_codes
#-------------------------------------
# Open a new netCDF file for writing
#--------------------------------------------------------------
# Format options are: NETCDF3_CLASSIC, NETCDF3_64BIT_OFFSET,
# NETCDF3_64BIT_DATA, NETCDF4_CLASSIC, and NETCDF4
#-----------------------------------------------------------------
# NETCDF3_CLASSIC results in MUCH SMALLER filesizes than using
# NETCDF4_CLASSIC or NETCDF4.
# NETCDF3_CLASSIC, June_20_67_0D-Q.nc, 5200 bytes
# NETCDF4_CLASSIC, June_20_67_0D-Q.nc, 4217537 Bytes
# The 2nd one is 811 TIMES BIGGER, even after setting chunksize.
#-----------------------------------------------------------------
# For more info see: http://unidata.github.io/netcdf4-python/
#-----------------------------------------------------------------
# The "nccopy" utility can convert between these formats.
#-----------------------------------------------------------------
try:
format = 'NETCDF3_CLASSIC'
### format = 'NETCDF4'
### format = 'NETCDF4_CLASSIC' # (before 2/19/17)
ncts_unit = nc.Dataset(file_name, mode='w', format=format)
OK = True
except:
OK = False
return OK
#------------------------------------------------------------
# Option to pre-fill with fill values
# Set fill_value for a var with "var._Fill_Value = number"
# For Nio was: opt.PreFill = False # (for efficiency)
#------------------------------------------------------------
ncts_unit.set_fill_off()
# ncts_unit.set_fill_on()
#-------------------------------------
# Prepare and save a history string
#-------------------------------------
# Sample output from time.asctime():
# "Thu Oct 8 17:10:18 2009"
#-------------------------------------
history = "Created using netCDF4 " + nc.__version__ + " on "
history = history + time.asctime() + ". "
history = history + comment
ncts_unit.history = history
#------------------------------------------------
# Create an unlimited time dimension (via None)
#------------------------------------------------
# Without using "int()" for length, we get this:
# TypeError: size must be None or integer
#------------------------------------------------
ncts_unit.createDimension("time", None)
#-------------------------
# Create a time variable
#---------------------------------------------------
#('f' = float32; must match in add_values_at_IDs()
#---------------------------------------------------
# NB! Can't use "time" vs. "tvar" here unless we
# add "import time" inside this function.
#---------------------------------------------------
tvar = ncts_unit.createVariable('time', 'f8', ("time",))
ncts_unit.variables['time'].units = time_units
#-----------------------------------
# Create variables using var_names
#-----------------------------------
# Returns "var" as a PyNIO object
#----------------------------------------------------------
# NB! The 3rd argument here (dimension), must be a tuple.
# If there is only one dimension, then we need to add a
# comma, as shown.
#-----------------------------------------------------------
# (2/19/17) For "0D" netCDF files created by TF, the files
# are much too large with the default chunksize. By using
# chunksizes=[1], filesize for Treynor is reduced by a
# factor of 6.9 (4.25 MB vs. 29.38 MB).
#-----------------------------------------------------------
# But even this is 287.9 times bigger than the TXT file!
#-----------------------------------------------------------
# Default chunksize in NetCDF 4.4.1.1 = 4MB.
#-----------------------------------------------------------
n_vars = len( var_names )
for k in xrange( n_vars ):
var_name = var_names[k]
## var = ncts_unit.createVariable(var_name, dtype_codes[k], ("time",))
## var = ncts_unit.createVariable(var_name, dtype_codes[k], ("time",), chunksizes=[512])
## var = ncts_unit.createVariable(var_name, dtype_codes[k], ("time",), chunksizes=[1])
## var = ncts_unit.createVariable(var_name, dtype_codes[k], ("time",), chunksizes=[4000])
var = ncts_unit.createVariable(var_name, dtype_codes[k], ("time",), chunksizes=[n_vars])
#------------------------------------
# Create attributes of the variable
#------------------------------------
ncts_unit.variables[var_name].long_name = long_names[k]
ncts_unit.variables[var_name].units = units_names[k]
#----------------------------------
# Specify a "nodata" fill value ?
#----------------------------------
# var._Fill_Value = -9999.0 ## Used for pre-fill above ?
self.ncts_unit = ncts_unit
return OK
# open_new_file()
#----------------------------------------------------------
def update_time_index(self, step=1):
#---------------------------------------------------
# We shouldn't update clock in every add_value()
# call because different values (for same time)
# may be written with different add_value() calls.
#---------------------------------------------------
#------------------------------------
# Increment the internal time index
#------------------------------------
self.time_index += step
# update_time_index()
#----------------------------------------------------------
def add_value(self, value, var_name, time=None,
time_index=-1):
#---------------------------------------------------
# Note: "time_index" allows insertion/overwrite
# of a value at a particular location.
#---------------------------------------------------
# This syntax works for scalars and grids
# nc_unit.variables[var_name].assign_value( value )
#---------------------------------------------------
#-------------------------------------
# Can use time_index to overwrite an
# existing grid vs. simple append.
#-------------------------------------
if (time_index == -1):
time_index = self.time_index
if (time is None):
time = np.float64( time_index )
#---------------------------------------
# Write a time to existing netCDF file
#---------------------------------------
times = self.ncts_unit.variables[ 'time' ]
times[ time_index ] = time
#---------------------------------------------
# Write a data value to existing netCDF file
#---------------------------------------------
values = self.ncts_unit.variables[ var_name ]
values[ time_index ] = value
####################################################
# We shouldn't update clock in every add_value()
# call because different values (for same time)
# may be written with different add_value() calls.
####################################################
#------------------------------------
# Increment the internal time index
#------------------------------------
# self.time_index += 1
#-------------------------------------------------
# 12/2/09: netCDF is supposed to take care of
# byteorder transparently. However, we need to
# make sure we don't byteswap in the function
# "model_output.save_value_to_file()" when the
# output format is netCDF.
#-------------------------------------------------
## if (sys.byteorder == 'big'):
## var[time_index] = value
## else:
## value2 = value.copy()
## var[time_index] = value2.byteswap()
## self.time_index += 1
# add_value()
#----------------------------------------------------------
def get_value(self, var_name, time_index):
values = self.ncts_unit.variables[ var_name ]
times = self.ncts_unit.variables[ 'time' ]
return (values[ time_index ], times[ time_index ])
# get_value()
#-------------------------------------------------------------------
def values_at_IDs(self, var, IDs):
#----------------------------------------------------------
# Notes: If "var" is a grid, subscript with self.IDs to
# get a 1D array of values. If "var" is scalar,
# return a vector with the scalar value repeated
# once for each ID in self.IDs.
#----------------------------------------------------------
#---------------------------------
# Is variable a grid or scalar ?
#---------------------------------
if (np.ndim(var) > 0):
return np.float32( var[ IDs ] )
else:
#-----------------------------------------------------
# (3/16/07) Bug fix. This gets used in case of q0,
# which is a scalar when INFIL_ALL_SCALARS is true.
# Without this, don't get a value for every ID.
#-----------------------------------------------------
n_IDs = np.size(IDs[0])
vector = np.zeros( n_IDs, dtype='Float32')
return (vector + np.float32(var))
# values_at_IDs()
#-------------------------------------------------------------------
def add_values_at_IDs(self, time, var, var_name, IDs,
time_index=-1):
#---------------------------------------------------
# Note: Here "var" is typically a grid and IDs are
# (row,col) subscripts into the grid. A set
# of variable names are constructed from the
# actual "var_name" (e.g. "Q") and the
# row and column. Note that we must have
# called open_new_file() with these same
# var_names.
#---------------------------------------------------
# Note: "time_index" allows insertion/overwrite
# of a value at a particular location.
#---------------------------------------------------
# This syntax works for scalars and grids
# nc_unit.variables[var_name].assign_value( value )
#---------------------------------------------------
#-------------------------------------
# Can use time_index to overwrite an
# existing grid vs. simple append.
#-------------------------------------
if (time_index == -1):
time_index = self.time_index
#---------------------------------------------
# Write current time to existing netCDF file
#---------------------------------------------
times = self.ncts_unit.variables[ 'time' ]
times[ time_index ] = time
#--------------------------------------------
# Write data values to existing netCDF file
#--------------------------------------------
vals = self.values_at_IDs( var, IDs )
rows = IDs[0]
cols = IDs[1]
n_IDs = np.size(rows)
for k in xrange(n_IDs):
#----------------------------------------
# Construct var_name of form: Q[24,32]
# or, if necessary, Q_24_32
#----------------------------------------
row_str = '_' + str(rows[k])
col_str = '_' + str(cols[k])
#--------------------------------------------------
# Must match with model_output.open_new_ts_file()
#--------------------------------------------------
## row_str = '[' + str(rows[k]) + ','
## col_str = str(cols[k]) + ']'
vname = var_name + row_str + col_str
values = self.ncts_unit.variables[ vname ]
values[ time_index ] = vals[k]
#---------------------------
# Increment the time index
#---------------------------
self.time_index += 1
# add_values_at_IDs()
#-------------------------------------------------------------------
def add_series(self, values, var_name, times,
time_index=-1):
#-----------------------------------------------------
# Note: "time_index" allows insertion/overwrite
# of a time series at a particular location.
#-----------------------------------------------------
# This syntax works for scalars and grids
# nc_unit.variables[var_name].assign_value( values )
#-----------------------------------------------------
#-------------------------------------
# Can use time_index to overwrite an
# existing grid vs. simple append.
#-------------------------------------
if (time_index == -1):
time_index = self.time_index
#---------------------------------------------
# Write a data value to existing netCDF file
#---------------------------------------------
series = self.ncts_unit.variables[ var_name ]
series[:] = values
######################################################
# WE SHOULDN'T update clock in every add_value()
# call because different vars (e.g. the time)
# must be written with different add_value() calls.
######################################################
#------------------------------------
# Increment the internal time index
#------------------------------------
# self.time_index += np.size(values)
# add_series()
#----------------------------------------------------------
def get_series(self, var_name):
series = self.ncts_unit.variables[ var_name ]
times = self.ncts_unit.variables[ 'time' ]
return (series, times)
# get_series()
#-------------------------------------------------------------------
def close_file(self):
# self.ncts_unit.sync() ## (netCDF4 has no "flush")
self.ncts_unit.close()
# close_file()
#-------------------------------------------------------------------
def close(self):
# self.ncts_unit.sync() ## (netCDF4 has no "flush")
self.ncts_unit.close()
# close()
#-------------------------------------------------------------------
|
peckhams/topoflow
|
topoflow/utils/ncts_files.py
|
Python
|
mit
| 27,069
|
[
"NetCDF"
] |
22e06176749ed8fa18ac89e25499af145799e93d3e5785c4f39c3315a94800a8
|
"""
"""
import os
import imp
import unittest
utility = imp.load_source( 'utility', os.path.join( os.path.dirname( __file__ ), '../../util/utility.py' ) )
log = utility.set_up_filelogger( __name__ + '.log' )
utility.add_galaxy_lib_to_path( 'test/unit/web/base' )
from galaxy.web.base.pluginframework import PageServingPluginManager
import mock
# ----------------------------------------------------------------------------- globals
contents1 = """${what} ${you} ${say}"""
# -----------------------------------------------------------------------------
class PageServingPluginManager_TestCase( unittest.TestCase ):
def test_plugin_load( self ):
"""should attempt load if criteria met"""
mock_app_dir = mock.MockDir({
'plugins' : {
'plugin1' : {
'templates' : {},
'static' : {}
},
'plugin2' : {
'static' : {}
},
'plugin3' : {
'templates' : {}
},
'not_a_plugin1' : 'blerbler',
'not_a_plugin2' : {},
}
})
mock_app = mock.MockApp( mock_app_dir.root_path )
plugin_mgr = PageServingPluginManager( mock_app, 'test', directories_setting='plugins' )
app_path = mock_app_dir.root_path
expected_plugins_path = os.path.join( app_path, 'plugins' )
self.assertEqual( plugin_mgr.base_url, 'test' )
self.assertItemsEqual( plugin_mgr.directories, [ expected_plugins_path ] )
self.assertItemsEqual( plugin_mgr.plugins.keys(), [ 'plugin1', 'plugin2', 'plugin3' ] )
plugin1 = plugin_mgr.plugins[ 'plugin1' ]
self.assertEqual( plugin1.name, 'plugin1' )
self.assertEqual( plugin1.path, os.path.join( expected_plugins_path, 'plugin1' ) )
self.assertEqual( plugin1.base_url, '/'.join([ plugin_mgr.base_url, plugin1.name ]) )
self.assertTrue( plugin1.serves_static )
self.assertEqual( plugin1.static_path, os.path.join( plugin1.path, 'static' ) )
self.assertEqual( plugin1.static_url, '/'.join([ plugin1.base_url, 'static' ]) )
self.assertTrue( plugin1.serves_templates )
self.assertEqual( plugin1.template_path, os.path.join( plugin1.path, 'templates' ) )
self.assertEqual( plugin1.template_lookup.__class__.__name__, 'TemplateLookup' )
plugin2 = plugin_mgr.plugins[ 'plugin2' ]
self.assertEqual( plugin2.name, 'plugin2' )
self.assertEqual( plugin2.path, os.path.join( expected_plugins_path, 'plugin2' ) )
self.assertEqual( plugin2.base_url, '/'.join([ plugin_mgr.base_url, plugin2.name ]) )
self.assertTrue( plugin2.serves_static )
self.assertEqual( plugin2.static_path, os.path.join( plugin2.path, 'static' ) )
self.assertEqual( plugin2.static_url, '/'.join([ plugin2.base_url, 'static' ]) )
self.assertFalse( plugin2.serves_templates )
plugin3 = plugin_mgr.plugins[ 'plugin3' ]
self.assertEqual( plugin3.name, 'plugin3' )
self.assertEqual( plugin3.path, os.path.join( expected_plugins_path, 'plugin3' ) )
self.assertEqual( plugin3.base_url, '/'.join([ plugin_mgr.base_url, plugin3.name ]) )
self.assertFalse( plugin3.serves_static )
self.assertTrue( plugin3.serves_templates )
self.assertEqual( plugin1.template_path, os.path.join( plugin1.path, 'templates' ) )
self.assertEqual( plugin1.template_lookup.__class__.__name__, 'TemplateLookup' )
mock_app_dir.remove()
def test_plugin_static_map( self ):
""""""
mock_app_dir = mock.MockDir({
'plugins' : {
'plugin1' : {
'templates' : {},
'static' : {}
}
}
})
mock_app = mock.MockApp( mock_app_dir.root_path )
plugin_mgr = PageServingPluginManager( mock_app, 'test', directories_setting='plugins' )
app_path = mock_app_dir.root_path
expected_plugins_path = os.path.join( app_path, 'plugins' )
self.assertItemsEqual( plugin_mgr.plugins.keys(), [ 'plugin1' ] )
plugin = plugin_mgr.plugins[ 'plugin1' ]
self.assertEqual( plugin_mgr.get_static_urls_and_paths(), [( plugin.static_url, plugin.static_path )] )
mock_app_dir.remove()
def test_plugin_templates( self ):
""""""
mock_app_dir = mock.MockDir({
'plugins' : {
'plugin1' : {
'templates' : {
'test.mako' : contents1
},
}
}
})
mock_app = mock.MockApp( mock_app_dir.root_path )
plugin_mgr = PageServingPluginManager( mock_app, 'test', directories_setting='plugins' )
app_path = mock_app_dir.root_path
expected_plugins_path = os.path.join( app_path, 'plugins' )
self.assertItemsEqual( plugin_mgr.plugins.keys(), [ 'plugin1' ] )
plugin = plugin_mgr.plugins[ 'plugin1' ]
rendered = plugin_mgr.fill_template( mock.MockTrans(), plugin, 'test.mako',
what='Hey', you='Ho', say='HeyHey HoHo' )
self.assertEqual( rendered, 'Hey Ho HeyHey HoHo' )
mock_app_dir.remove()
if __name__ == '__main__':
unittest.main()
|
mikel-egana-aranguren/SADI-Galaxy-Docker
|
galaxy-dist/test/unit/web/base/test_PageServingPluginManager.py
|
Python
|
gpl-3.0
| 5,382
|
[
"Galaxy"
] |
3019f54ad2320a538666a46b0e93e9b713e2cfa87e88303d5f60706d211ce2c2
|
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import os
import libcst as cst
import pathlib
import sys
from typing import (Any, Callable, Dict, List, Sequence, Tuple)
def partition(
predicate: Callable[[Any], bool],
iterator: Sequence[Any]
) -> Tuple[List[Any], List[Any]]:
"""A stable, out-of-place partition."""
results = ([], [])
for i in iterator:
results[int(predicate(i))].append(i)
# Returns trueList, falseList
return results[1], results[0]
class gkehubCallTransformer(cst.CSTTransformer):
CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata')
METHOD_TO_PARAMS: Dict[str, Tuple[str]] = {
'create_feature': ('parent', 'feature_id', 'resource', 'request_id', ),
'create_membership': ('parent', 'membership_id', 'resource', 'request_id', ),
'delete_feature': ('name', 'force', 'request_id', ),
'delete_membership': ('name', 'request_id', ),
'generate_connect_manifest': ('name', 'namespace', 'proxy', 'version', 'is_upgrade', 'registry', 'image_pull_secret_content', ),
'get_feature': ('name', ),
'get_membership': ('name', ),
'list_features': ('parent', 'page_size', 'page_token', 'filter', 'order_by', ),
'list_memberships': ('parent', 'page_size', 'page_token', 'filter', 'order_by', ),
'update_feature': ('name', 'update_mask', 'resource', 'request_id', ),
'update_membership': ('name', 'update_mask', 'resource', 'request_id', ),
}
def leave_Call(self, original: cst.Call, updated: cst.Call) -> cst.CSTNode:
try:
key = original.func.attr.value
kword_params = self.METHOD_TO_PARAMS[key]
except (AttributeError, KeyError):
# Either not a method from the API or too convoluted to be sure.
return updated
# If the existing code is valid, keyword args come after positional args.
# Therefore, all positional args must map to the first parameters.
args, kwargs = partition(lambda a: not bool(a.keyword), updated.args)
if any(k.keyword.value == "request" for k in kwargs):
# We've already fixed this file, don't fix it again.
return updated
kwargs, ctrl_kwargs = partition(
lambda a: a.keyword.value not in self.CTRL_PARAMS,
kwargs
)
args, ctrl_args = args[:len(kword_params)], args[len(kword_params):]
ctrl_kwargs.extend(cst.Arg(value=a.value, keyword=cst.Name(value=ctrl))
for a, ctrl in zip(ctrl_args, self.CTRL_PARAMS))
request_arg = cst.Arg(
value=cst.Dict([
cst.DictElement(
cst.SimpleString("'{}'".format(name)),
cst.Element(value=arg.value)
)
# Note: the args + kwargs looks silly, but keep in mind that
# the control parameters had to be stripped out, and that
# those could have been passed positionally or by keyword.
for name, arg in zip(kword_params, args + kwargs)]),
keyword=cst.Name("request")
)
return updated.with_changes(
args=[request_arg] + ctrl_kwargs
)
def fix_files(
in_dir: pathlib.Path,
out_dir: pathlib.Path,
*,
transformer=gkehubCallTransformer(),
):
"""Duplicate the input dir to the output dir, fixing file method calls.
Preconditions:
* in_dir is a real directory
* out_dir is a real, empty directory
"""
pyfile_gen = (
pathlib.Path(os.path.join(root, f))
for root, _, files in os.walk(in_dir)
for f in files if os.path.splitext(f)[1] == ".py"
)
for fpath in pyfile_gen:
with open(fpath, 'r') as f:
src = f.read()
# Parse the code and insert method call fixes.
tree = cst.parse_module(src)
updated = tree.visit(transformer)
# Create the path and directory structure for the new file.
updated_path = out_dir.joinpath(fpath.relative_to(in_dir))
updated_path.parent.mkdir(parents=True, exist_ok=True)
# Generate the updated source file at the corresponding path.
with open(updated_path, 'w') as f:
f.write(updated.code)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="""Fix up source that uses the gkehub client library.
The existing sources are NOT overwritten but are copied to output_dir with changes made.
Note: This tool operates at a best-effort level at converting positional
parameters in client method calls to keyword based parameters.
Cases where it WILL FAIL include
A) * or ** expansion in a method call.
B) Calls via function or method alias (includes free function calls)
C) Indirect or dispatched calls (e.g. the method is looked up dynamically)
These all constitute false negatives. The tool will also detect false
positives when an API method shares a name with another method.
""")
parser.add_argument(
'-d',
'--input-directory',
required=True,
dest='input_dir',
help='the input directory to walk for python files to fix up',
)
parser.add_argument(
'-o',
'--output-directory',
required=True,
dest='output_dir',
help='the directory to output files fixed via un-flattening',
)
args = parser.parse_args()
input_dir = pathlib.Path(args.input_dir)
output_dir = pathlib.Path(args.output_dir)
if not input_dir.is_dir():
print(
f"input directory '{input_dir}' does not exist or is not a directory",
file=sys.stderr,
)
sys.exit(-1)
if not output_dir.is_dir():
print(
f"output directory '{output_dir}' does not exist or is not a directory",
file=sys.stderr,
)
sys.exit(-1)
if os.listdir(output_dir):
print(
f"output directory '{output_dir}' is not empty",
file=sys.stderr,
)
sys.exit(-1)
fix_files(input_dir, output_dir)
|
googleapis/python-gke-hub
|
scripts/fixup_gkehub_v1_keywords.py
|
Python
|
apache-2.0
| 6,730
|
[
"VisIt"
] |
51ff8bee2c87eea5124de48945ba80cb6ed0e94a0866d79bf2d6bba614f3261c
|
# Copyright 2018 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helpers for handling quantum state vectors."""
import abc
from typing import Dict, List, Optional, Tuple, TYPE_CHECKING, Sequence
import numpy as np
from cirq import linalg, qis, value
from cirq.sim import simulator
if TYPE_CHECKING:
import cirq
# For backwards compatibility and to make mypy happy:
class StateVectorMixin:
"""A mixin that provide methods for objects that have a state vector."""
def __init__(self, qubit_map: Optional[Dict['cirq.Qid', int]] = None, *args, **kwargs):
"""Inits StateVectorMixin.
Args:
qubit_map: A map from the Qubits in the Circuit to the the index
of this qubit for a canonical ordering. This canonical ordering
is used to define the state (see the state_vector() method).
*args: Passed on to the class that this is mixed in with.
**kwargs: Passed on to the class that this is mixed in with.
"""
# Reason for 'type: ignore': https://github.com/python/mypy/issues/5887
super().__init__(*args, **kwargs) # type: ignore
self._qubit_map = qubit_map or {}
qid_shape = simulator._qubit_map_to_shape(self._qubit_map)
self._qid_shape = None if qubit_map is None else qid_shape
@property
def qubit_map(self) -> Dict['cirq.Qid', int]:
return self._qubit_map
def _qid_shape_(self) -> Tuple[int, ...]:
if self._qid_shape is None:
return NotImplemented
return self._qid_shape
@abc.abstractmethod
def state_vector(self) -> np.ndarray:
"""Return the state vector (wave function).
The vector is returned in the computational basis with these basis
states defined by the `qubit_map`. In particular the value in the
`qubit_map` is the index of the qubit, and these are translated into
binary vectors where the last qubit is the 1s bit of the index, the
second-to-last is the 2s bit of the index, and so forth (i.e. big
endian ordering).
Example:
qubit_map: {QubitA: 0, QubitB: 1, QubitC: 2}
Then the returned vector will have indices mapped to qubit basis
states like the following table
| | QubitA | QubitB | QubitC |
| :-: | :----: | :----: | :----: |
| 0 | 0 | 0 | 0 |
| 1 | 0 | 0 | 1 |
| 2 | 0 | 1 | 0 |
| 3 | 0 | 1 | 1 |
| 4 | 1 | 0 | 0 |
| 5 | 1 | 0 | 1 |
| 6 | 1 | 1 | 0 |
| 7 | 1 | 1 | 1 |
"""
raise NotImplementedError()
def dirac_notation(self, decimals: int = 2) -> str:
"""Returns the state vector as a string in Dirac notation.
Args:
decimals: How many decimals to include in the pretty print.
Returns:
A pretty string consisting of a sum of computational basis kets
and non-zero floats of the specified accuracy."""
return qis.dirac_notation(self.state_vector(), decimals, qid_shape=self._qid_shape)
def density_matrix_of(self, qubits: List['cirq.Qid'] = None) -> np.ndarray:
r"""Returns the density matrix of the state.
Calculate the density matrix for the system on the list, qubits.
Any qubits not in the list that are present in self.state_vector() will
be traced out. If qubits is None the full density matrix for
self.state_vector() is returned, given self.state_vector() follows
standard Kronecker convention of numpy.kron.
For example:
self.state_vector() = np.array([1/np.sqrt(2), 1/np.sqrt(2)],
dtype=np.complex64)
qubits = None
gives us
$$
\rho = \begin{bmatrix}
0.5 & 0.5 \\
0.5 & 0.5
\end{bmatrix}
$$
Args:
qubits: list containing qubit IDs that you would like
to include in the density matrix (i.e.) qubits that WON'T
be traced out.
Returns:
A numpy array representing the density matrix.
Raises:
ValueError: if the size of the state represents more than 25 qubits.
IndexError: if the indices are out of range for the number of qubits
corresponding to the state.
"""
return qis.density_matrix_from_state_vector(
self.state_vector(),
[self.qubit_map[q] for q in qubits] if qubits is not None else None,
qid_shape=self._qid_shape,
)
def bloch_vector_of(self, qubit: 'cirq.Qid') -> np.ndarray:
"""Returns the bloch vector of a qubit in the state.
Calculates the bloch vector of the given qubit
in the state given by self.state_vector(), given that
self.state_vector() follows the standard Kronecker convention of
numpy.kron.
Args:
qubit: qubit who's bloch vector we want to find.
Returns:
A length 3 numpy array representing the qubit's bloch vector.
Raises:
ValueError: if the size of the state represents more than 25 qubits.
IndexError: if index is out of range for the number of qubits
corresponding to the state.
"""
return qis.bloch_vector_from_state_vector(
self.state_vector(), self.qubit_map[qubit], qid_shape=self._qid_shape
)
def sample_state_vector(
state_vector: np.ndarray,
indices: List[int],
*, # Force keyword args
qid_shape: Optional[Tuple[int, ...]] = None,
repetitions: int = 1,
seed: 'cirq.RANDOM_STATE_OR_SEED_LIKE' = None,
) -> np.ndarray:
"""Samples repeatedly from measurements in the computational basis.
Note that this does not modify the passed in state.
Args:
state_vector: The multi-qubit state vector to be sampled. This is an
array of 2 to the power of the number of qubit complex numbers, and
so state must be of size ``2**integer``. The `state_vector` can be
a vector of size ``2**integer`` or a tensor of shape
``(2, 2, ..., 2)``.
indices: Which qubits are measured. The `state_vector` is assumed to be
supplied in big endian order. That is the xth index of v, when
expressed as a bitstring, has its largest values in the 0th index.
qid_shape: The qid shape of the `state_vector`. Specify this argument
when using qudits.
repetitions: The number of times to sample.
seed: A seed for the pseudorandom number generator.
Returns:
Measurement results with True corresponding to the ``|1⟩`` state.
The outer list is for repetitions, and the inner corresponds to
measurements ordered by the supplied qubits. These lists
are wrapped as a numpy ndarray.
Raises:
ValueError: ``repetitions`` is less than one or size of `state_vector`
is not a power of 2.
IndexError: An index from ``indices`` is out of range, given the number
of qubits corresponding to the state.
"""
if repetitions < 0:
raise ValueError(f'Number of repetitions cannot be negative. Was {repetitions}')
shape = qis.validate_qid_shape(state_vector, qid_shape)
num_qubits = len(shape)
qis.validate_indices(num_qubits, indices)
if repetitions == 0 or len(indices) == 0:
return np.zeros(shape=(repetitions, len(indices)), dtype=np.uint8)
prng = value.parse_random_state(seed)
# Calculate the measurement probabilities.
probs = _probs(state_vector, indices, shape)
# We now have the probability vector, correctly ordered, so sample over
# it. Note that we us ints here, since numpy's choice does not allow for
# choosing from a list of tuples or list of lists.
result = prng.choice(len(probs), size=repetitions, p=probs)
# Convert to individual qudit measurements.
meas_shape = tuple(shape[i] for i in indices)
return np.array(
[value.big_endian_int_to_digits(result[i], base=meas_shape) for i in range(len(result))],
dtype=np.uint8,
)
def measure_state_vector(
state_vector: np.ndarray,
indices: Sequence[int],
*, # Force keyword args
qid_shape: Optional[Tuple[int, ...]] = None,
out: np.ndarray = None,
seed: 'cirq.RANDOM_STATE_OR_SEED_LIKE' = None,
) -> Tuple[List[int], np.ndarray]:
"""Performs a measurement of the state in the computational basis.
This does not modify `state` unless the optional `out` is `state`.
Args:
state_vector: The state to be measured. This state vector is assumed to
be normalized. The state vector must be of size 2 ** integer. The
state vector can be of shape (2 ** integer) or (2, 2, ..., 2).
indices: Which qubits are measured. The `state_vector` is assumed to be
supplied in big endian order. That is the xth index of v, when
expressed as a bitstring, has the largest values in the 0th index.
qid_shape: The qid shape of the `state_vector`. Specify this argument
when using qudits.
out: An optional place to store the result. If `out` is the same as
the `state_vector` parameter, then `state_vector` will be modified
inline. If `out` is not None, then the result is put into `out`.
If `out` is None a new value will be allocated. In all of these
case out will be the same as the returned ndarray of the method.
The shape and dtype of `out` will match that of `state_vector` if
`out` is None, otherwise it will match the shape and dtype of `out`.
seed: A seed for the pseudorandom number generator.
Returns:
A tuple of a list and a numpy array. The list is an array of booleans
corresponding to the measurement values (ordered by the indices). The
numpy array is the post measurement state vector. This state vector has
the same shape and dtype as the input `state_vector`.
Raises:
ValueError if the size of state is not a power of 2.
IndexError if the indices are out of range for the number of qubits
corresponding to the state.
"""
shape = qis.validate_qid_shape(state_vector, qid_shape)
num_qubits = len(shape)
qis.validate_indices(num_qubits, indices)
if len(indices) == 0:
if out is None:
out = np.copy(state_vector)
elif out is not state_vector:
np.copyto(dst=out, src=state_vector)
# Final else: if out is state then state will be modified in place.
return ([], out)
prng = value.parse_random_state(seed)
# Cache initial shape.
initial_shape = state_vector.shape
# Calculate the measurement probabilities and then make the measurement.
probs = _probs(state_vector, indices, shape)
result = prng.choice(len(probs), p=probs)
###measurement_bits = [(1 & (result >> i)) for i in range(len(indices))]
# Convert to individual qudit measurements.
meas_shape = tuple(shape[i] for i in indices)
measurement_bits = value.big_endian_int_to_digits(result, base=meas_shape)
# Calculate the slice for the measurement result.
result_slice = linalg.slice_for_qubits_equal_to(
indices, big_endian_qureg_value=result, qid_shape=shape
)
# Create a mask which is False for only the slice.
mask = np.ones(shape, dtype=bool)
mask[result_slice] = False
if out is None:
out = np.copy(state_vector)
elif out is not state_vector:
np.copyto(dst=out, src=state_vector)
# Final else: if out is state then state will be modified in place.
# Potentially reshape to tensor, and then set masked values to 0.
out.shape = shape
out[mask] = 0
# Restore original shape (if necessary) and renormalize.
out.shape = initial_shape
out /= np.sqrt(probs[result])
return measurement_bits, out
def _probs(state: np.ndarray, indices: Sequence[int], qid_shape: Tuple[int, ...]) -> np.ndarray:
"""Returns the probabilities for a measurement on the given indices."""
tensor = np.reshape(state, qid_shape)
# Calculate the probabilities for measuring the particular results.
if len(indices) == len(qid_shape):
# We're measuring every qudit, so no need for fancy indexing
probs = np.abs(tensor) ** 2
probs = np.transpose(probs, indices)
probs = np.reshape(probs, np.prod(probs.shape, dtype=np.int64))
else:
# Fancy indexing required
meas_shape = tuple(qid_shape[i] for i in indices)
probs = (
np.abs(
[
tensor[
linalg.slice_for_qubits_equal_to(
indices, big_endian_qureg_value=b, qid_shape=qid_shape
)
]
for b in range(np.prod(meas_shape, dtype=np.int64))
]
)
** 2
)
probs = np.sum(probs, axis=tuple(range(1, len(probs.shape))))
# To deal with rounding issues, ensure that the probabilities sum to 1.
probs /= np.sum(probs)
return probs
|
quantumlib/Cirq
|
cirq-core/cirq/sim/state_vector.py
|
Python
|
apache-2.0
| 14,082
|
[
"DIRAC"
] |
90748fe1953eb15f0f9c309a1241acd1a9a937ac363c76712f1bfe30ac96d5cc
|
""" WebApp handler for Downtimes WebApp
"""
import json
from datetime import datetime
from DIRAC import gLogger
from DIRAC.ResourceStatusSystem.Client.PublisherClient import PublisherClient
from WebAppDIRAC.Lib.WebHandler import _WebHandler as WebHandler, WErr
class DowntimesHandler(WebHandler):
DEFAULT_AUTHORIZATION = "authenticated"
# Publisher client
pubClient = None
@classmethod
def initializeHandler(cls, serviceInfo):
"""This may be overwritten when you write a DIRAC service handler
And it must be a class method. This method is called only one time,
at the first request
:param dict serviceInfo: infos about services
"""
cls.pubClient = PublisherClient()
def web_getSelectionData(self):
"""Get selection data
:return: dict
"""
callback = {"name": set(), "severity": set(), "sites": set()}
downtimes = self.pubClient.getCachedDowntimes(None, None, None, None)
if downtimes["OK"]:
dtList = [dict(zip(downtimes["Columns"], dt)) for dt in downtimes["Value"]]
for dt in dtList:
callback["name"].add(dt["Name"])
callback["severity"].add(dt["Severity"])
sites = self.pubClient.getSites()
if sites["OK"]:
callback["site"] = sites["Value"]
for key, value in callback.items():
callback[key] = [[item] for item in list(value)]
callback[key] = [["All"]] + callback[key]
callback["view"] = [["tabular"], ["availability"]]
return callback
def web_getDowntimesData(
self, name=None, severity=None, site=None, startDate=None, startTime=None, endDate=None, endTime=None
):
"""Get downtimes data
:param str name: name
:param str severity: severity
:param str site: site
:param str startDate: start date
:param str startTime: start time
:param str endDate: end date
:param str endTime: end time
:return: dict
"""
requestParams = {
"name": list(json.loads(name)) if name else [],
"site": list(json.loads(site)) if site else [],
"severity": list(json.loads(severity)) if severity else [],
}
requestParams["startDate"] = datetime.utcnow()
if startDate and startTime:
requestParams["startDate"] = datetime.strptime("%s %s" % (startDate, startTime), "%Y-%m-%d %H:%M")
requestParams["endDate"] = datetime.utcnow()
if endDate and endTime:
requestParams["endDate"] = datetime.strptime("%s %s" % (endDate, endTime), "%Y-%m-%d %H:%M")
gLogger.info("Request parameters:", requestParams)
retVal = self.pubClient.getSitesResources(requestParams["site"])
if not retVal["OK"]:
raise WErr.fromSERROR(retVal)
sitesResources = retVal["Value"]
names = []
if requestParams["site"]:
for _site, resources in list(sitesResources.items()):
names += resources["ces"]
names += resources["ses"]
downtimes = self.pubClient.getCachedDowntimes(None, None, names, requestParams["severity"])
if not downtimes["OK"]:
raise WErr.fromSERROR(downtimes)
dtList = [dict(zip(downtimes["Columns"], dt)) for dt in downtimes["Value"]]
for dt in dtList:
dt["Site"] = "Unknown"
for site, resources in list(sitesResources.items()):
if dt["Name"] in resources["ces"] + resources["ses"]:
dt["Site"] = site
break
dt["StartDate"] = str(dt["StartDate"])
dt["EndDate"] = str(dt["EndDate"])
return {"success": "true", "result": dtList, "total": len(dtList)}
|
DIRACGrid/WebAppDIRAC
|
src/WebAppDIRAC/WebApp/handler/DowntimesHandler.py
|
Python
|
gpl-3.0
| 3,845
|
[
"DIRAC"
] |
2ab763f3a70ce369ddebb45974a803f6044b8bf2f908c6fe962c0c92d79bf90b
|
# encoding: utf-8
# py3k support
from __future__ import print_function
#import setuptools # for bdist_egg and console_scripts entry point
from setuptools import setup,Extension
#import distutils.command.install_scripts
#import distutils.command.sdist
#import distutils.command.build_ext
import os.path, os, shutil, re, subprocess, sys, codecs
from glob import glob
from os.path import sep,join,basename,dirname
DISTBUILD=None # set to None if building locally, or to a string when dist-building on a bot
if 'DEB_BUILD_ARCH' in os.environ: DISTBUILD='debian'
WIN=(sys.platform=='win32')
PY3K=(sys.version_info[0]==3)
if not DISTBUILD: # don't do parallel at buildbot
# monkey-patch for parallel compilation
def parallelCCompile(self, sources, output_dir=None, macros=None, include_dirs=None, debug=0, extra_preargs=None, extra_postargs=None, depends=None):
# those lines are copied from distutils.ccompiler.CCompiler directly
macros, objects, extra_postargs, pp_opts, build = self._setup_compile(output_dir, macros, include_dirs, sources, depends, extra_postargs)
cc_args = self._get_cc_args(pp_opts, debug, extra_preargs)
# parallel code
N=4 # number of parallel compilations by default
import multiprocessing.pool
def _single_compile(obj):
try: src, ext = build[obj]
except KeyError: return
print(obj)
self._compile(obj, src, ext, cc_args, extra_postargs, pp_opts)
# convert to list, imap is evaluated on-demand
list(multiprocessing.pool.ThreadPool(N).imap(_single_compile,objects))
return objects
import distutils.ccompiler
distutils.ccompiler.CCompiler.compile=parallelCCompile
pathSourceTree=join('build-src-tree')
pathSources=join(pathSourceTree,'src')
pathScripts=join(pathSourceTree,'scripts')
pathHeaders=join(pathSourceTree,'woo')
## get version info
version=None
revno=None
# on debian, get version from changelog
if DISTBUILD=='debian':
version=re.match(r'^[^(]* \(([^)]+)\).*$',codecs.open('debian/changelog','r','utf-8').readlines()[0]).group(1)
print('Debian version from changelog: ',version)
revno='debian'
# get version from queryling local bzr repo
if not version:
revno='na'
if os.path.exists('.git'):
try:
r0=os.popen("git rev-list HEAD --count 2>"+("NUL" if WIN else "/dev/null")).readlines()[0][:-1]
r1=os.popen("git log -1 --format='%h'").readlines()[0][:-1]
revno=r0+'+git.'+r1
except: pass
elif os.path.exists('.bzr'):
try:
# http://stackoverflow.com/questions/3630893/determining-the-bazaar-version-number-from-python-without-calling-bzr
from bzrlib.branch import BzrBranch
branch = BzrBranch.open_containing('.')[0]
revno=str(branch.last_revision_info()[0])+'+bzr'
except: pass
else:
print('WARN: unable to determine revision number (no .git or .bzr here, or getting revision failed).')
revno='0+na'
version='1.0.'+revno
##
## build options
##
features=['qt4','vtk','opengl','gts','openmp']
if 'CC' in os.environ and os.environ['CC'].endswith('clang'): features.remove('openmp')
flavor='' #('' if WIN else 'distutils')
if PY3K: flavor+=('-' if flavor else '')+'py3'
debug=False
chunkSize=1 # (1 if WIN else 10)
hotCxx=[] # plugins to be compiled separately despite chunkSize>1
# XXX
chunkSize=1
# features+=['noxml']
## arch-specific optimizations
march='corei7' if WIN else 'native'
# lower, but at least some machine-specific optimizations
# FIXME: code will fail to execute on older CPUs
if DISTBUILD: march='core2'
##
## end build options
##
if DISTBUILD=='debian':
chunkSize=1 # be nice to the builder at launchpad
# build with XML even for Debian (should be OK RAM-wise now)
# features+=['noxml'] # this should cut to half RAM used by boost::serialization templates at compile-time
cxxFlavor=('_'+re.sub('[^a-zA-Z0-9_]','_',flavor) if flavor else '')
execFlavor=('-'+flavor) if flavor else ''
cxxInternalModule='_cxxInternal%s%s'%(cxxFlavor,'_debug' if debug else '')
if 'opengl' in features and 'qt4' not in features: raise ValueError("The 'opengl' features is only meaningful in conjunction with 'qt4'.")
#
# install headers and source (in chunks)
#
def wooPrepareHeaders():
'Copy headers to build-src-tree/woo/ subdirectory'
if not os.path.exists(pathHeaders): os.makedirs(pathHeaders)
hpps=sum([glob(pat) for pat in ('lib/*/*.hpp','lib/multimethods/loki/*.h','core/*.hpp','pkg/*/*.hpp','pkg/*/*.hpp')],[])
for hpp in hpps:
d=join(pathHeaders,dirname(hpp))
if not os.path.exists(d): os.makedirs(d)
#print(hpp,d)
shutil.copyfile(hpp,join(pathHeaders,hpp))
def wooPrepareChunks():
'Make chunks from sources, and install those files to build-src-tree'
# make chunks from sources
global chunkSize
if chunkSize<0: chunkSize=10000
srcs=[glob('lib/*/*.cpp'),glob('py/*.cpp'),glob('py/*/*.cpp')]
if WIN: srcs=[[s] for s in sum(srcs,[])] # compile each file separately even amongst base files
if 'opengl' in features: srcs+=[glob('gui/qt4/*.cpp')+glob('gui/qt4/*.cc')]
if 'gts' in features: srcs+=[[f] for f in glob('py/3rd-party/pygts-0.3.1/*.cpp')]
pkg=glob('pkg/*.cpp')+glob('pkg/*/*.cpp')+glob('pkg/*/*/*.cpp')+glob('core/*.cpp')
# print(srcs,pkg)
for i in range(0,len(pkg),chunkSize): srcs.append(pkg[i:i+chunkSize])
hot=[]
for i in range(len(srcs)):
hot+=[s for s in srcs[i] if basename(s)[:-4] in hotCxx]
srcs[i]=[s for s in srcs[i] if basename(s)[:-4] not in hotCxx]
srcs+=[[h] for h in hot] # add as single files
#print(srcs)
# check hash
import hashlib; h=hashlib.new('sha1'); h.update(str(srcs).encode('utf-8'))
# exactly the same configuration does not have to be repeated again
chunksSame=os.path.exists(join(pathSources,h.hexdigest()))
if not chunksSame and os.path.exists(pathSources): shutil.rmtree(pathSources)
if not os.path.exists(pathSources):
os.mkdir(pathSources)
open(join(pathSources,h.hexdigest()),'w')
#print(srcs)
for i,src in enumerate(srcs):
if len(src)==0: continue
# ext=('c' if src[0].split('.')[-1]=='c' else 'cpp')
ext='cpp' # FORCE the .cpp extension so that we don't have to pass -xc++ to the compiler with clang (which chokes at plain c with -std=c++11)
nameNoExt='' if len(src)>1 else '-'+basename(src[0][:-len(src[0].split('.')[-1])-1])
chunkPath=join(pathSources,('chunk-%02d%s.%s'%(i,nameNoExt,ext)))
if not chunksSame:
f=open(chunkPath,'w')
for s in src:
f.write('#include"../%s"\n'%s) # build-src-tree
else:
# update timestamp to the newest include
if not os.path.exists(chunkPath): raise RuntimeError('Chunk configuration identical, but chunk %s not found; delete the build directory to recreate everything.'%chunkPath)
last=max([os.path.getmtime(s) for s in src])
#for s in src: print(s,os.path.getmtime(s))
if last>os.path.getmtime(chunkPath):
print('Updating timestamp of %s (%s -> %s)'%(chunkPath,os.path.getmtime(chunkPath),last+10))
os.utime(chunkPath,(last+10,last+10))
def wooPrepareQt4():
'Generate Qt4 files (normally handled by scons); those are only needed with Qt/OpenGL'
global features
if 'qt4' not in features: return
rccInOut=[('gui/qt4/img.qrc','gui/qt4/img_rc.py')]
uicInOut=[('gui/qt4/controller.ui','gui/qt4/ui_controller.py')]
mocInOut=[
('gui/qt4/GLViewer.hpp','gui/qt4/moc_GLViewer.cc'),
('gui/qt4/OpenGLManager.hpp','gui/qt4/moc_OpenGLManager.cc')
]
cxxRccInOut=[('gui/qt4/GLViewer.qrc','gui/qt4/qrc_GLViewer.cc')]
# stamp for python version, so that files are re-created even if time-stamp is OK but python version is different
# this is encountered when building debian package for py2 and py3 one after another
stamp='_pyversion__by_setup.py_'
currver=str(sys.version_info[:2]) # e.g (2, 7)
sameVer=os.path.exists(stamp) and (open(stamp,'r').read()==currver)
if not sameVer: open(stamp,'w').write(currver)
if WIN:
# this is ugly
# pyuic is a batch file, which is not runnable from mingw shell directly
# find the real exacutable then
import PyQt4.uic
pyuic4=['python',PyQt4.uic.__file__[:-12]+'pyuic.py']
else:
pyuic4=['pyuic4']
for tool,opts,inOut,enabled in [(['pyrcc4'],['-py3' if PY3K else '-py2'],rccInOut,True),(pyuic4,[],uicInOut,True),(['moc'],['-DWOO_OPENGL','-DWOO_QT4'],mocInOut,('opengl' in features)),(['rcc'],['-name','GLViewer'],cxxRccInOut,('opengl' in features))]:
if not enabled: continue
for fIn,fOut in inOut:
cmd=tool+opts+[fIn,'-o',fOut]
# no need to recreate, since source is older
if sameVer and os.path.exists(fOut) and os.path.getmtime(fIn)<os.path.getmtime(fOut): continue
print(' '.join(cmd))
status=subprocess.call(cmd)
if status: raise RuntimeError("Error %d returned when running %s"%(status,' '.join(cmd)))
if not os.path.exists(fOut): RuntimeError("No output file (though exit status was zero): %s"%(' '.join(cmd)))
def pkgconfig(packages):
flag_map={'-I':'include_dirs','-L':'library_dirs','-l':'libraries'}
ret={'library_dirs':[],'include_dirs':[],'libraries':[]}
for token in subprocess.check_output("pkg-config --libs --cflags %s"%' '.join(packages),shell=True).decode('utf-8').split():
if token[:2] in flag_map:
ret.setdefault(flag_map.get(token[:2]),[]).append(token[2:])
# throw others to extra_link_args
else: ret.setdefault('extra_link_args',[]).append(token)
# remove duplicated
for k,v in ret.items(): ret[k]=list(set(v))
return ret
# if the following file is missing, we are being run from sdist, which has tree already prepared
# otherwise, install headers, chunks and scripts where they should be
if os.path.exists('examples'):
wooPrepareQt4()
wooPrepareHeaders()
wooPrepareChunks()
# files are in chunks
cxxSrcs=['py/config.cxx']+glob(join(pathSources,'*.cpp'))+glob(join(pathSources,'*.c'))
###
### preprocessor, compiler, linker flags
###
cppDirs,cppDef,cxxFlags,cxxLibs,linkFlags,libDirs=[],[],[],[],[],[]
##
## general
##
cppDef+=[
('WOO_REVISION',revno),
('WOO_VERSION',version),
('WOO_SOURCE_ROOT','' if DISTBUILD else dirname(os.path.abspath(__file__)).replace('\\','/')),
('WOO_BUILD_ROOT',os.path.abspath(pathSourceTree).replace('\\','/')),
('WOO_FLAVOR',flavor),
('WOO_CXX_FLAVOR',cxxFlavor),
]
cppDef+=[('WOO_'+feature.upper().replace('-','_'),None) for feature in features]
cppDirs+=[pathSourceTree]
cxxStd='c++11'
## this is needed for packaging on Ubuntu 12.04, where gcc 4.6 is the default
if DISTBUILD=='debian':
# c++0x for gcc == 4.6
gccVer=bytes(subprocess.check_output(['g++','--version'])).split(b'\n')[0].split()[-1]
print('GCC version is',gccVer)
if gccVer.startswith(b'4.6'):
cxxStd='c++0x'
print('Compiling with gcc 4.6 (%s), using -std=%s. Adding -pedantic.'%(gccVer,cxxStd))
cxxFlags+=['-pedantic'] # work around for http://gcc.gnu.org/bugzilla/show_bug.cgi?id=50478
cxxFlags+=['-Wall','-fvisibility=hidden','-std='+cxxStd,'-pipe']
cxxLibs+=['m',
'boost_python-py3%d'%(sys.version_info[1]) if PY3K else 'boost_python' ,
'boost_system',
'boost_thread',
'boost_date_time',
'boost_filesystem',
'boost_iostreams',
'boost_regex',
'boost_serialization',
'boost_chrono']
##
## Platform-specific
##
if DISTBUILD:
# this would be nice, but gcc at launchpad ICEs with that
# cxxFlags+=['-ftime-report','-fmem-report','-fpre-ipa-mem-report','-fpost-ipa-mem-report']
pass
if WIN:
cppDirs+=['c:/MinGW64/include','c:/MinGW64/include/eigen3','c:/MinGW64/include/boost-1_51']
# avoid warnings from other headers
# avoid hitting section limit by inlining
cxxFlags+=['-Wno-strict-aliasing','-Wno-attributes','-finline-functions']
boostTag='-mgw47-mt-1_51'
cxxLibs=[(lib+boostTag if lib.startswith('boost_') else lib) for lib in cxxLibs]
else:
cppDirs+=['/usr/include/eigen3']
# we want to use gold with gcc under Linux
# binutils now require us to select gold explicitly (see https://launchpad.net/ubuntu/saucy/+source/binutils/+changelog)
linkFlags+=['-fuse-ld=gold']
##
## Debug-specific
##
if debug:
cppDef+=[('WOO_DEBUG',None),]
cxxFlags+=['-Os']
else:
cppDef+=[('NDEBUG',None),]
cxxFlags+=['-g0','-O3']
if march: cxxFlags+=['-march=%s'%march]
linkFlags+=['-Wl,--strip-all']
##
## Feature-specific
##
if 'openmp' in features:
cxxLibs.append('gomp')
cxxFlags.append('-fopenmp')
if 'opengl' in features:
if WIN: cxxLibs+=['opengl32','glu32','glut','gle','QGLViewer2']
# XXX: this will fail in Ubuntu >= 13.10 which calls this lib QGLViewer
else:
cxxLibs+=['GL','GLU','glut','gle']
# now older Ubuntu calls the library qglviewer-qt4, newer QGLViewer
# we check that by asking the compiler what it can find and what not
# this assumes gcc can be run
try:
# this will for sure fail - either the lib is not found (the first error reported), or we get "undefined reference to main" when the lib is there
subprocess.check_output(['gcc','-lqglviewer-qt4'],stderr=subprocess.STDOUT)
except (subprocess.CalledProcessError,) as e:
#print(20*'=','output from gcc -lqglviewer-qt4',20*'=')
#print(e.output)
#print(60*'=')
if ' -lqglviewer-qt4' in e.output.decode('utf-8').split('\n')[0]:
print('info: library check: qglviewer-qt4 not found, using QGLViewer instead')
cxxLibs+=['QGLViewer']
else:
print('info: library check: qglviewer-qt4 found')
cxxLibs+=['qglviewer-qt4']
# qt4 without OpenGL is pure python and needs no additional compile options
if ('qt4' in features):
cppDef+=[('QT_CORE_LIB',None),('QT_GUI_LIB',None),('QT_OPENGL_LIB',None),('QT_SHARED',None)]
if WIN:
cppDirs+=['c:/MinGW64/include/'+component for component in ('QtCore','QtGui','QtOpenGL','QtXml')]
cxxLibs+=['QtCore4','QtGui4','QtOpenGL4','QtXml4']
else: cppDirs+=['/usr/include/qt4']+['/usr/include/qt4/'+component for component in ('QtCore','QtGui','QtOpenGL','QtXml')]
# cxxLibs+=['QtGui4','QtCore4','QtOpenGL4','
if 'vtk' in features:
vtks=(glob('/usr/include/vtk-*') if not WIN else glob('c:/MinGW64/include/vtk-*'))
if not vtks: raise ValueError("No header directory for VTK detected.")
elif len(vtks)>1: raise ValueError("Multiple header directories for VTK detected: "%','.join(vtks))
cppDirs+=[vtks[0]]
# find VTK version from include directory ending in -x.y
m=re.match(r'.*-(\d)\.(\d+)$',vtks[0])
if not m: raise ValueError("VTK include directory %s not matching numbers ...-x.y, unable to guess VTK version."%vtks[0])
vtkMajor,vtkMinor=int(m.group(1)),int(m.group(2))
if vtkMajor==5:
cxxLibs+=['vtkCommon','vtkHybrid','vtkRendering','vtkIO','vtkFiltering']
elif vtkMajor==6:
suff='-%d.%d'%(vtkMajor,vtkMinor) # library suffix used on Debian, perhaps not used elsewhere?!
cxxLibs+=['vtkCommonCore'+suff,'vtkCommonDataModel'+suff,'vtkIOXML'+suff]
else: raise ValueError('Unsupported VTK version %d.x'%vtkMajor)
if WIN:
if vtkMajor==6: raise ValueError("VTK6.x not supported under Windows (yet).")
libDirs+=glob('c:/MinGW64/lib/vtk-*')
cxxLibs+=['vtksys']
if 'gts' in features:
c=pkgconfig(['gts'])
cxxLibs+=['gts']+c['libraries']
cppDirs+=c['include_dirs']
libDirs+=c['library_dirs']
wooModules=['woo.'+basename(py)[:-3] for py in glob('py/*.py') if basename(py)!='__init__.py']
# compiler-specific flags, if ever needed:
# http://stackoverflow.com/a/5192738/761090
#class WooBuildExt(distutils.command.build_ext.build_ext):
# def build_extensions(self):
# c=self.compiler.compiler_type
# if re.match(r'.*(gcc|g\+\+)^'):
# for e in self.extensions:
# e.extra_compile_args=['-fopenmp']
# e.extra_link_args=['-lgomp']
setup(name='woo',
version=version,
author='Václav Šmilauer',
author_email='eu@doxos.eu',
url='http://www.woodem.org',
description='Discrete dynamic compuations, especially granular mechanics.',
long_description='''Extesible and portable framework primarily for mechanics
granular materials. Computation parts are written in c++ parallelized using
OpenMP, fully accessible and modifiable from python (ipython console or
scripts). Arbitrarily complex scene can be scripted. Qt-based user interface
is provided, featuring flexible OpenGL display, inspection of all objects
and runtime modification. Parametric preprocessors can be written in pure
python, and batch system can be used to drive parametric studies. New
material models and particle shapes can be added (in c++) without modifying
existing classes.
Woo is an evolution of the Yade package
(http://www.launchpad.net/yade), aiming at more flexibility, extensibility,
tighter integration with python and user-friendliness.
''',
classifiers=[
'License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+)',
'Programming Language :: C++',
'Programming Language :: Python',
'Operating System :: POSIX',
'Operating System :: Microsoft :: Windows',
'Topic :: Scientific/Engineering :: Mathematics',
'Intended Audience :: Science/Research',
'Development Status :: 4 - Beta'
],
# '' must use join(...) to use native separator
# otherwise scripts don't get installed!
# http://stackoverflow.com/questions/13271085/console-scripts-entry-point-ignored
package_dir={'woo':'py','':join('core','main'),'woo.qt':'gui/qt4','woo.pre':'py/pre','woo.gts':'py/3rd-party/pygts-0.3.1'},
packages=(
['woo','woo._monkey','woo.tests','woo.pre']
+(['woo.qt'] if 'qt4' in features else [])
+(['woo.gts'] if 'gts' in features else [])
),
# unfortunately, package_data must be in the same directory as the module
# they belong to; therefore, they were moved to py/resources
package_data={'woo':['data/*']},
py_modules=wooModules+['wooMain'],
ext_modules=[
Extension('woo.'+cxxInternalModule,
sources=cxxSrcs,
include_dirs=cppDirs,
define_macros=cppDef,
extra_compile_args=cxxFlags,
libraries=cxxLibs,
library_dirs=libDirs,
extra_link_args=linkFlags,
),
],
# works under windows as well now
# http://stackoverflow.com/questions/13271085/console-scripts-entry-point-ignored
entry_points={
'console_scripts':[
# wwoo on Windows, woo on Linux
'%swoo%s = wooMain:main'%('w' if WIN else '',execFlavor),
# wwoo_batch on windows
# woo-batch on Linux
'%swoo%s%sbatch = wooMain:batch'%('w' if WIN else '',execFlavor,'_' if WIN else '-'),
],
},
# woo.__init__ makes symlinks to _cxxInternal, which would not be possible if zipped
# see http://stackoverflow.com/a/10618900/761090
zip_safe=False,
# py3k support
use_2to3=True,
)
|
gladk/woodem
|
setup.py
|
Python
|
gpl-2.0
| 18,140
|
[
"VTK"
] |
089d20606c79f78219b748151e0c21c67ea9f72c7efbc7404ff6fbdbda0cffdc
|
#!/usr/bin/env python
"""Script for checking the ABINIT automatic tests."""
from __future__ import print_function, division, absolute_import #, unicode_literals
import sys
import os
from pprint import pprint
from optparse import OptionParser
try:
from cStringIO import StringIO
except ImportError:
from io import StringIO
#try:
# import tests
#except ImportError:
# Add the directory [...]/abinit/tests to $PYTHONPATH
pack_dir, tail = os.path.split(os.path.abspath(__file__))
pack_dir, tail = os.path.split(pack_dir)
sys.path.insert(0, pack_dir)
import tests
from tests import abitests
abenv = tests.abenv
__version__ = "0.1"
__author__ = "Matteo Giantomassi"
def check_authors(suite):
def first_second_name(string):
idx = string.rfind(".")
if idx == -1:
first, second = "", string
else:
first, second = string[:idx], string[idx+1:]
return first.strip(), second.strip()
second_names = []
for test in suite:
if not hasattr(test, "authors"):
authors = []
for t in test:
authors.extend(t.authors)
authors = set(authors)
else:
authors = test.authors
for string in authors:
f, s = first_second_name(string)
if not f and s and s != "Unknown":
print("author(s) first name is missing in file %s, string = %s " %(test.full_id, s))
second_names.append(s)
#print(test.id, first_second_name(test.authors[0])[1])
return set(second_names)
def get_allowed_cpp_vars():
"""
Inspect the libpaw header file, the autoconf macros and config.ac
Extract and return the set of allowed CPP options, used to check
the exclude_cpp_vars TEST_INFO section for possible typos.
Based on ~abinit/abichecks/scripts/check-cpp-options.
"""
import re
re_m4file = re.compile("\.m4$")
re_hdrfile = re.compile("\.h$")
re_acdef = re.compile("AC_DEFINE\\(")
re_cppdef = re.compile("^([ ]?)+#([ ]?)+define [0-9A-Z_]*")
abidir = os.path.abspath("../")
# Extract CPP options from the libPAW header files
cpp_libpaw = set()
for root, dirs, files in os.walk(os.path.join(abidir, "src/39_libpaw")):
for src in files:
if not re_hdrfile.search(src): continue
with open(os.path.join(root, src), "rt") as fh:
for line in fh:
if not re_cppdef.search(line): continue
tmp_def = re.sub("^[# ]*define[ ]*([0-9A-Z_]*).*","\\1", line).strip()
cpp_libpaw.add(tmp_def)
# Extract CPP options from the build system
cpp_buildsys = set()
for root, dirs, files in os.walk(os.path.join(abidir, "config/m4")):
for src in files:
if not re_m4file.search(src): continue
with open(os.path.join(root, src), "rt") as fh:
for line in fh:
if not re_acdef.search(line): continue
tmp_def = re.sub(".*AC_DEFINE\\([\\[]?([^\\],]*).*","\\1",line).strip()
cpp_buildsys.add(tmp_def)
with open(os.path.join(abidir, "configure.ac"), "rt") as fh:
for line in fh:
if not re_acdef.search(line): continue
tmp_def = re.sub(".*AC_DEFINE\\([\\[]?([^\\],]*).*","\\1",line).strip()
cpp_buildsys.add(tmp_def)
return cpp_buildsys.union(cpp_libpaw)
def main():
usage = "usage: %prog [suite_name] [options] [-h|--help] for help)"
version = "%prog "+ str(__version__)
parser = OptionParser(usage=usage, version=version)
parser.add_option("-v", "--verbose", dest="verbose", action="store_true", default=False,
help="verbose mode")
options, args = parser.parse_args()
# Get the full database.
# TODO should use with_disabled=True
full_database = abitests.build_database(with_disabled=False)
retcode = 0
print("Stale or lost reference files... ", end="")
err = full_database.find_stale_or_lost_refs()
if err:
retcode += len(err)
print("FAILED")
sys.stderr.write(err)
else:
print("OK")
print("Stale or lost inputs... ", end="")
err = full_database.find_stale_or_lost_inputs()
if err:
retcode += len(err)
print("FAILED")
sys.stderr.write(err)
else:
print("OK")
unknowns, wrong = full_database.find_unknown_wrong_keywords()
print("Unknown keywords... ", end="")
if unknowns:
retcode += len(unknowns)
print("FAILED")
print("The following keywords are not documented:\n\t%s" % unknowns)
print("ACTION: Add the corresponding documentation to the KNOWN_KEYWORDS dictionary defined in tests/__init__.py")
else:
print("OK")
print("Wrong keywords ... ", end="")
if wrong:
retcode += len(wrong)
print("FAILED")
print("The following keywords contain blank spaces:\n\t%s" % wrong)
print("ACTION: Replace blank spaces with underscores")
else:
print("OK")
print("Testing whether important TEST_INFO entries are present... ", end="")
errstr = full_database.check_testinfo_options()
if errstr:
retcode += 1
print("FAILED")
print(errstr)
else:
print("OK")
# Check authors.
#print("Testing whether authors are defined... ", end="")
#second_names = set()
#for suite_name, suite in full_database.items():
# second_names = second_names.union(check_authors(suite))
#if second_names:
# retcode += len(second_names)
# print("FAILED")
# pprint(second_names)
#else:
# print("OK")
# Add test on CPP options
allowed_cpp_vars = get_allowed_cpp_vars()
#print(allowed_cpp_vars)
for suite_name, suite in full_database.items():
for test in suite:
# Remove ! from string e.g. !HAVE_MPI
tvars = set(v[1:] if v.startswith("!") else v for v in test.need_cpp_vars)
diff = tvars.difference(allowed_cpp_vars)
if diff:
print("in test: ", test)
print("diff", diff)
return retcode
if __name__ == "__main__":
sys.exit(main())
|
abinit/abinit
|
tests/check_testsuite.py
|
Python
|
gpl-3.0
| 6,283
|
[
"ABINIT"
] |
090964362c5a882a79798c31c5bc30b9fffc8922e53f162665cee810532c07ad
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# --- BEGIN_HEADER ---
#
# Proxy Agent - Agent enabling secured ingoing traffic via a MiG proxy
# without opening services to anything other than localhost.
#
# Can be used as either a library or a command-line client.
#
# @author Simon Andreas Frimann Lund
#
# Copyright (C) 2003-2009 The MiG Project lead by Brian Vinter
#
# This file is part of MiG.
#
# MiG is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# MiG is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# -- END_HEADER ---
#
import time, socket, sys, os, logging
from struct import unpack, pack
from threading import Thread
from OpenSSL import SSL
from proxy.plumber import *
from proxy import mip
logging.basicConfig(filename='proxyagent.log',level=logging.DEBUG)
# Proxy agent
def verify_cb(conn, cert, errnum, depth, ok):
logging.debug('Proxy certificate: %s %s' % (cert.get_subject(), ok))
return ok
control_socket = None # Life-line to the proxy
connections = [] # List of connections to close and cleanup gracefully
buffer_size = 4096 # Must be mod 2, 4096 might be too big for some...
# but it is much faster if it's supported
def connect(host, port, identity, tls=True):
# Connect to proxy and identify
handshake(host, port, identity)
# Handle Setup request forever
while 1:
try:
data = control_socket.recv(1) # Get the message type
if (data == mip.messages['SETUP_REQUEST']):
(ticket,) = unpack('!I', control_socket.recv(4))
(proxy_host_length,) = unpack('!I', control_socket.recv(4))
proxy_host = control_socket.recv(proxy_host_length)
(proxy_port,) = unpack('!I', control_socket.recv(4))
(machine_host_length,) = unpack('!I', control_socket.recv(4))
machine_host = control_socket.recv(machine_host_length)
(machine_port,) = unpack('!I', control_socket.recv(4))
handle_setup_request(ticket, proxy_host, proxy_port, machine_host, machine_port, tls)
else:
logging.debug('CLIENT: Broken data! %s' % repr(data))
except:
logging.debug('CLIENT: Unexpected error, shutting down control connection.')
control_socket.close()
break
"""
handshake,
Identify proxy agent to proxy server
TODO: catch those exceptions and add return error code...
"""
def handshake(host, port, identity, tls=True):
global control_socket
handshakeMessage = mip.handshake(1, identity)
dir = os.path.dirname(sys.argv[0])
if dir == '':
dir = os.curdir
if tls:
# Initialize context
ctx = SSL.Context(SSL.TLSv1_METHOD)
ctx.set_verify(SSL.VERIFY_NONE, verify_cb)
ctx.use_privatekey_file (os.path.join(dir, 'certs/client.pkey'))
ctx.use_certificate_file(os.path.join(dir, 'certs/client.cert'))
ctx.load_verify_locations(os.path.join(dir, 'certs/CA.cert'))
# Set up client
control_socket = SSL.Connection(ctx, socket.socket(socket.AF_INET, socket.SOCK_STREAM))
else:
control_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
control_socket.connect((host, port))
control_socket.send(handshakeMessage)
"""
handle_setup_request,
Set's up a new tunnel between local endpoint and proxy server
"""
def handle_setup_request(ticket, proxy_host, proxy_port, machine_host, machine_port, tls=True):
global control_socket
logging.debug('CLIENT: Performing setup %s (phost:%s,pport:%s,mhost:%s,mport:%s)' % (ticket, proxy_host, proxy_port, machine_host, machine_port))
# Connect to proxy
dir = os.path.dirname(sys.argv[0])
if dir == '':
dir = os.curdir
proxyConnected = False
endPointConnected = False
# Connect to endpoint
try:
endpoint = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
endpoint.connect((machine_host, machine_port))
endPointConnected = True
except:
logging.debug('CLIENT: Socket error when contacting endpoint.')
# Connect to proxy and prepend setup response
if endPointConnected:
try:
if tls:
# Initialize context
ctx = SSL.Context(SSL.TLSv1_METHOD)
ctx.set_verify(SSL.VERIFY_NONE, verify_cb) # Demand a certificate
ctx.use_privatekey_file (os.path.join(dir, 'certs/client.pkey'))
ctx.use_certificate_file(os.path.join(dir, 'certs/client.cert'))
ctx.load_verify_locations(os.path.join(dir, 'certs/CA.cert'))
proxy_socket = SSL.Connection(ctx, socket.socket(socket.AF_INET, socket.SOCK_STREAM))
else:
proxy_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
proxy_socket.connect((proxy_host, proxy_port))
proxyConnected = True
except:
logging.debug('CLIENT: Socket error when contacting proxy.')
# Send status to the connection handler in proxy
if proxyConnected:
proxy_socket.sendall(mip.setup_response(ticket, int(endPointConnected and proxyConnected)))
# Send status back over control line to proxy
control_socket.sendall(mip.setup_response(ticket,int(endPointConnected and proxyConnected)))
# Setup tunnel between proxy and endpoint
if proxyConnected and endPointConnected:
# Add connections to list so they can be shut down gracefully
connections.append(endpoint)
connections.append(proxy_socket)
mario = PlumberTS(endpoint, proxy_socket, 4096, True)
#mario = Plumber(endpoint, ss, 1024, True)
logging.debug('CLIENT: Setup done!')
else:
logging.debug('CLIENT: Setup Failure!')
return proxyConnected and endPointConnected
if __name__ == '__main__':
if len(sys.argv) < 5:
print 'Usage: python[2] mipclient.py HOST PORT IDENTIFIER SSL'
sys.exit(1)
# TODO: - Sanitize commandline arguments
# - Provide cert files as commandline arguments
try:
connect(sys.argv[1], int(sys.argv[2]), sys.argv[3], (sys.argv[4] == 'SSL'))
except KeyboardInterrupt:
logging.debug('CLIENT: User interrupted, shutting down connections.')
for conn in connections:
logging.debug('%s ' % conn)
conn.close()
logging.debug('CLIENT: Shut down control connection.')
control_socket.close()
logging.debug('CLIENT: Control connection is down.')
exit(0)
else:
pass
|
heromod/migrid
|
mig/vm-proxy/deprecated/proxyagentPreDaemon.py
|
Python
|
gpl-2.0
| 6,939
|
[
"Brian"
] |
1d5ca3aba2e4815902416abb2a9f245f80e2058f029bed25dd50cc35d6210bb0
|
#!/usr/bin/python
# Copyright (C) 2016 Bitergia
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# Authors:
# Daniel Izquierdo Cortazar <dizquierdo@bitergia.com>
#
from datetime import datetime as dt
import pandas
from grimoirelab_toolkit.datetime import str_to_datetime
from grimoire_elk.enriched.sortinghat_gelk import SortingHat
class Events(object):
""" Class that 'eventizes' information for a given dataset.
This class aims at providing with some granularity all of the events
of a pre-formatted item. This is expected to help in later steps
during the visualization platform.
"""
META_TIMESTAMP = "metadata__timestamp"
META_UPDATED_ON = "metadata__updated_on"
META_ENRICHED_ON = "metadata__enriched_on"
GRIMOIRE_CREATION_DATE = "grimoire_creation_date"
PROJECT = "project"
PROJECT_1 = "project_1"
PERCEVAL_UUID = "perceval_uuid"
SH_AUTHOR_ID = "author_id"
SH_AUTHOR_ORG_NAME = "author_org_name"
SH_AUTHOR_NAME = "author_name"
SH_AUTHOR_UUID = "author_uuid"
SH_AUTHOR_DOMAIN = "author_domain"
SH_AUTHOR_USER_NAME = "author_user_name"
SH_AUTHOR_BOT = "author_bot"
UNKNOWN = 'Unknown'
def __init__(self, items, enrich):
""" Main constructor of the class
:param items: original list of JSON that contains all info about a commit
:type items: list
:param enrich:
:type enrich: grimoire_elk.elk.enrich.Enrich
"""
self.items = items
self.enrich = enrich
def _add_metadata(self, df_columns, item):
metadata__timestamp = item["metadata__timestamp"]
metadata__updated_on = item["metadata__updated_on"]
metadata__enriched_on = dt.utcnow().isoformat()
df_columns[Events.META_TIMESTAMP].append(metadata__timestamp)
df_columns[Events.META_UPDATED_ON].append(metadata__updated_on)
df_columns[Events.META_ENRICHED_ON].append(metadata__enriched_on)
# If called after '__add_sh_info', item will already contain
# 'grimoire_creation_date'
if Events.GRIMOIRE_CREATION_DATE in item:
creation_date = item[Events.GRIMOIRE_CREATION_DATE]
else:
creation_date = str_to_datetime(item['data']['AuthorDate'])
df_columns[Events.GRIMOIRE_CREATION_DATE].append(creation_date)
# Perceval fields
df_columns[Events.PERCEVAL_UUID].append(item['uuid'])
# TODO add other common fields as 'perceval version', 'tag', 'origin'...
def _add_general_info(self, df_columns, item):
project_item = self.enrich.get_item_project(item)
df_columns[Events.PROJECT].append(project_item[Events.PROJECT])
df_columns[Events.PROJECT_1].append(project_item[Events.PROJECT_1])
def _add_sh_info(self, df_columns, item, update_sh_db=False):
# To ensure we have procesed the entity
if update_sh_db:
identities = self.enrich.get_identities(item)
SortingHat.add_identities(self.enrich.sh_db, identities,
self.enrich.get_connector_name())
# Add the grimoire_creation_date to the raw item
# It is used for getting the right affiliation
item.update(self.enrich.get_grimoire_fields(
item["data"]["AuthorDate"], "commit"))
sh_identity = self.enrich.get_item_sh(item)
author_id = Events.UNKNOWN
author_org_name = Events.UNKNOWN
author_name = Events.UNKNOWN
author_uuid = Events.UNKNOWN
author_domain = Events.UNKNOWN
author_username = Events.UNKNOWN
author_bot = False
# Add SH information, if any
if sh_identity[Events.SH_AUTHOR_ID]:
author_id = sh_identity[Events.SH_AUTHOR_ID]
if sh_identity[Events.SH_AUTHOR_ORG_NAME]:
author_org_name = sh_identity[Events.SH_AUTHOR_ORG_NAME]
if sh_identity[Events.SH_AUTHOR_NAME]:
author_name = sh_identity[Events.SH_AUTHOR_NAME]
if sh_identity[Events.SH_AUTHOR_UUID]:
author_uuid = sh_identity[Events.SH_AUTHOR_UUID]
if sh_identity[Events.SH_AUTHOR_DOMAIN]:
author_domain = sh_identity[Events.SH_AUTHOR_DOMAIN]
if sh_identity[Events.SH_AUTHOR_USER_NAME]:
author_username = sh_identity[Events.SH_AUTHOR_USER_NAME]
if sh_identity[Events.SH_AUTHOR_BOT]:
author_bot = sh_identity[Events.SH_AUTHOR_BOT]
df_columns[Events.SH_AUTHOR_ID].append(author_id)
df_columns[Events.SH_AUTHOR_ORG_NAME].append(author_org_name)
df_columns[Events.SH_AUTHOR_NAME].append(author_name)
df_columns[Events.SH_AUTHOR_UUID].append(author_uuid)
df_columns[Events.SH_AUTHOR_DOMAIN].append(author_domain)
df_columns[Events.SH_AUTHOR_USER_NAME].append(author_username)
df_columns[Events.SH_AUTHOR_BOT].append(author_bot)
def _init_common_fields(self, df_columns):
# Metadata fields
df_columns[Events.META_TIMESTAMP] = []
df_columns[Events.META_UPDATED_ON] = []
df_columns[Events.META_ENRICHED_ON] = []
# Common fields
df_columns[Events.GRIMOIRE_CREATION_DATE] = []
df_columns[Events.PROJECT] = []
df_columns[Events.PROJECT_1] = []
df_columns[Events.PERCEVAL_UUID] = []
# SortigHat information
df_columns[Events.SH_AUTHOR_ID] = []
df_columns[Events.SH_AUTHOR_ORG_NAME] = []
df_columns[Events.SH_AUTHOR_NAME] = []
df_columns[Events.SH_AUTHOR_UUID] = []
df_columns[Events.SH_AUTHOR_DOMAIN] = []
df_columns[Events.SH_AUTHOR_USER_NAME] = []
df_columns[Events.SH_AUTHOR_BOT] = []
def _add_common_fields(self, df_columns, item):
self._add_metadata(df_columns, item)
self._add_sh_info(df_columns, item)
self._add_general_info(df_columns, item)
@staticmethod
def _add_common_events(events, df_columns):
events[Events.META_TIMESTAMP] = df_columns[Events.META_TIMESTAMP]
events[Events.META_UPDATED_ON] = df_columns[Events.META_UPDATED_ON]
events[Events.META_ENRICHED_ON] = df_columns[Events.META_ENRICHED_ON]
events[Events.GRIMOIRE_CREATION_DATE] = df_columns[Events.GRIMOIRE_CREATION_DATE]
events[Events.PROJECT] = df_columns[Events.PROJECT]
events[Events.PROJECT_1] = df_columns[Events.PROJECT_1]
events[Events.PERCEVAL_UUID] = df_columns[Events.PERCEVAL_UUID]
events[Events.SH_AUTHOR_ID] = df_columns[Events.SH_AUTHOR_ID]
events[Events.SH_AUTHOR_ORG_NAME] = df_columns[Events.SH_AUTHOR_ORG_NAME]
events[Events.SH_AUTHOR_NAME] = df_columns[Events.SH_AUTHOR_NAME]
events[Events.SH_AUTHOR_UUID] = df_columns[Events.SH_AUTHOR_UUID]
events[Events.SH_AUTHOR_DOMAIN] = df_columns[Events.SH_AUTHOR_DOMAIN]
events[Events.SH_AUTHOR_USER_NAME] = df_columns[Events.SH_AUTHOR_USER_NAME]
events[Events.SH_AUTHOR_BOT] = df_columns[Events.SH_AUTHOR_BOT]
class Bugzilla(Events):
""" Class used to 'eventize' Bugzilla items
This splits information of each item based on a pre-configured mapping.
There are several levels of events. These levels were created as a way
to have more or less time consuming generation of events.
"""
EVENT_OPEN = "ISSUE_OPEN"
# Fields supported by this module (when a DataFrame is returned)
ISSUE_ID = "id"
ISSUE_EVENT = "eventtype"
ISSUE_DATE = "date"
ISSUE_OWNER = "owner"
def __bug_photo(self, item):
""" Retrieves basic information about the current status of the bug
That current status contains the photo of the bug at the moment of
the analysis. These fields will be used later for create extra
events
"""
def __init__(self, items):
""" Main constructor of the class
:param items: original list of JSON that contains all info about a bug
:type items: list
"""
self.items = items
def eventize(self, granularity):
""" This splits the JSON information found at self.events into the
several events. For this there are three different levels of time
consuming actions: 1-soft, 2-medium and 3-hard.
Level 1 provides events about open and closed issues.
Level 2 provides events about the rest of the status updates.
Level 3 provides events about the rest of the values in any of the
fields.
:param granularity: Levels of time consuming actions to calculate events
:type granularity: integer
:returns: Pandas dataframe with splitted events.
:rtype: pandas.DataFrame
"""
issue = {}
issue[Bugzilla.ISSUE_ID] = []
issue[Bugzilla.ISSUE_EVENT] = []
issue[Bugzilla.ISSUE_DATE] = []
issue[Bugzilla.ISSUE_OWNER] = []
events = pandas.DataFrame()
for item in self.items:
bug_data = item["data"]
if granularity == 1:
# Open Date: filling a new event
issue[Bugzilla.ISSUE_ID].append(bug_data['bug_id'][0]['__text__'])
issue[Bugzilla.ISSUE_EVENT].append(Bugzilla.EVENT_OPEN)
issue[Bugzilla.ISSUE_DATE].append(str_to_datetime(bug_data['creation_ts'][0]['__text__']))
issue[Bugzilla.ISSUE_OWNER].append(bug_data['reporter'][0]["__text__"])
# Adding the rest of the status updates (if there were any)
if 'activity' in bug_data.keys():
activity = bug_data["activity"]
for change in activity:
# if change["What"] == "Status":
# Filling a new event
issue[Bugzilla.ISSUE_ID].append(bug_data['bug_id'][0]['__text__'])
issue[Bugzilla.ISSUE_EVENT].append("ISSUE_" + change["Added"])
issue[Bugzilla.ISSUE_DATE].append(str_to_datetime(change["When"]))
issue[Bugzilla.ISSUE_OWNER].append(change["Who"])
if granularity == 2:
# TBD Let's produce an index with all of the changes.
# Let's have in mind the point about having the changes of initiating
# the ticket.
pass
if granularity == 3:
# TDB
pass
# Done in this way to have an order (and not a direct cast)
events[Bugzilla.ISSUE_ID] = issue[Bugzilla.ISSUE_ID]
events[Bugzilla.ISSUE_EVENT] = issue[Bugzilla.ISSUE_EVENT]
events[Bugzilla.ISSUE_DATE] = issue[Bugzilla.ISSUE_DATE]
events[Bugzilla.ISSUE_OWNER] = issue[Bugzilla.ISSUE_OWNER]
return events
class BugzillaRest(Events):
""" Class used to eventize Bugzilla Rest items
This splits each item information based on a pre-existing mapping.
"""
EVENT_OPEN = "ISSUE_OPEN"
# Fields supported by this module (when a DataFrame is returned)
ISSUE_ID = "id"
ISSUE_EVENT = "eventtype"
ISSUE_DATE = "date"
ISSUE_OWNER = "owner"
ISSUE_ADDED = "added"
ISSUE_REMOVED = "removed"
def __init__(self, items):
""" Main constructor of the class
:param items: original list of JSON that contains all info about a bug
:type items: list
"""
self.items = items
def eventize(self, granularity):
""" This splits the JSON information found at self.events into the
several events. For this there are three different levels of time
consuming actions: 1-soft, 2-medium and 3-hard.
Level 1 provides events about open and closed issues.
Level 2 provides events about the rest of the status updates.
Level 3 provides events about the rest of the values in any of the
fields.
:param granularity: Levels of time consuming actions to calculate events
:type granularity: integer
:returns: Pandas dataframe with splitted events.
:rtype: pandas.DataFrame
"""
issue = {}
issue[BugzillaRest.ISSUE_ID] = []
issue[BugzillaRest.ISSUE_EVENT] = []
issue[BugzillaRest.ISSUE_DATE] = []
issue[BugzillaRest.ISSUE_OWNER] = []
issue[BugzillaRest.ISSUE_ADDED] = []
issue[BugzillaRest.ISSUE_REMOVED] = []
events = pandas.DataFrame()
for item in self.items:
bug_data = item["data"]
if granularity == 1:
# Open Date: filling a new event
issue[BugzillaRest.ISSUE_ID].append(bug_data['id'])
issue[BugzillaRest.ISSUE_EVENT].append(BugzillaRest.EVENT_OPEN)
issue[BugzillaRest.ISSUE_DATE].append(str_to_datetime(bug_data['creation_time']))
issue[BugzillaRest.ISSUE_OWNER].append(bug_data['creator_detail']["real_name"])
issue[BugzillaRest.ISSUE_ADDED].append("-")
issue[BugzillaRest.ISSUE_REMOVED].append("-")
# Adding the rest of the status updates (if there were any)
if 'history' in bug_data.keys():
history = bug_data["history"]
for step in history:
# Filling a new event
who = step["who"]
when = str_to_datetime(step["when"])
changes = step["changes"]
for change in changes:
issue[BugzillaRest.ISSUE_ID].append(bug_data['id'])
issue[BugzillaRest.ISSUE_EVENT].append("ISSUE_" + change["field_name"])
issue[BugzillaRest.ISSUE_ADDED].append(change["added"])
issue[BugzillaRest.ISSUE_REMOVED].append(change["removed"])
issue[BugzillaRest.ISSUE_DATE].append(when)
issue[BugzillaRest.ISSUE_OWNER].append(who)
if granularity == 2:
# TBD Let's produce an index with all of the changes.
# Let's have in mind the point about having the changes of initiating
# the ticket.
pass
if granularity == 3:
# TDB
pass
# Done in this way to have an order (and not a direct cast)
events[BugzillaRest.ISSUE_ID] = issue[BugzillaRest.ISSUE_ID]
events[BugzillaRest.ISSUE_EVENT] = issue[BugzillaRest.ISSUE_EVENT]
events[BugzillaRest.ISSUE_DATE] = issue[BugzillaRest.ISSUE_DATE]
events[BugzillaRest.ISSUE_OWNER] = issue[BugzillaRest.ISSUE_OWNER]
events[BugzillaRest.ISSUE_ADDED] = issue[BugzillaRest.ISSUE_ADDED]
events[BugzillaRest.ISSUE_REMOVED] = issue[BugzillaRest.ISSUE_REMOVED]
return events
class Git(Events):
""" Class used to 'eventize' Git items
This splits information of each item based on a pre-configured mapping.
There are several levels of events. These levels were created as a way
to have more or less time consuming generation of events.
"""
EVENT_COMMIT = "COMMIT"
EVENT_FILE = "FILE_"
# Fields supported by this module (when a DataFrame is returned)
COMMIT_ID = "id"
COMMIT_EVENT = "eventtype"
COMMIT_DATE = "date"
COMMIT_OWNER = "owner"
COMMIT_COMMITTER = "committer"
COMMIT_COMMITTER_DATE = "committer_date"
COMMIT_REPOSITORY = "repository"
COMMIT_MESSAGE = "message"
COMMIT_NUM_FILES = "num_files"
COMMIT_ADDED_LINES = "num_added_lines"
COMMIT_REMOVED_LINES = "num_removed_lines"
COMMIT_HASH = "hash"
AUTHOR_DOMAIN = "git_author_domain"
FILE_EVENT = "fileaction"
FILE_PATH = "filepath"
FILE_ADDED_LINES = "addedlines"
FILE_REMOVED_LINES = "removedlines"
FILE_FILES = "files"
def __init__(self, items, git_enrich):
""" Main constructor of the class
:param items: original list of JSON that contains all info about a commit
:type items: list
:param git_enrich:
:type enrich: grimoire_elk.elk.git.GitEnrich
"""
super().__init__(items=items, enrich=git_enrich)
def __add_commit_info(self, df_columns, item):
commit_data = item["data"]
repository = item["origin"]
creation_date = str_to_datetime(commit_data['AuthorDate'])
df_columns[Git.COMMIT_HASH].append(commit_data['commit'])
df_columns[Git.COMMIT_ID].append(commit_data['commit'])
df_columns[Git.COMMIT_EVENT].append(Git.EVENT_COMMIT)
df_columns[Git.COMMIT_DATE].append(creation_date)
df_columns[Git.COMMIT_OWNER].append(commit_data['Author'])
df_columns[Git.COMMIT_COMMITTER].append(commit_data['Commit'])
df_columns[Git.COMMIT_COMMITTER_DATE].append(str_to_datetime(commit_data['CommitDate']))
df_columns[Git.COMMIT_REPOSITORY].append(repository)
if 'message' in commit_data.keys():
df_columns[Git.COMMIT_MESSAGE].append(commit_data['message'])
else:
df_columns[Git.COMMIT_MESSAGE].append('')
author_domain = self.enrich.get_identity_domain(self.enrich.get_sh_identity(item, 'Author'))
df_columns[Git.AUTHOR_DOMAIN].append(author_domain)
def eventize(self, granularity):
""" This splits the JSON information found at self.events into the
several events. For this there are three different levels of time
consuming actions: 1-soft, 2-medium and 3-hard.
Level 1 provides events about commits
Level 2 provides events about files
Level 3 provides other events (not used so far)
:param granularity: Levels of time consuming actions to calculate events
:type granularity: integer
:returns: Pandas dataframe with splitted events.
:rtype: pandas.DataFrame
"""
df_columns = {}
# Init common columns
self._init_common_fields(df_columns)
# First level granularity
df_columns[Git.COMMIT_ID] = []
df_columns[Git.COMMIT_EVENT] = []
df_columns[Git.COMMIT_DATE] = []
df_columns[Git.COMMIT_OWNER] = []
df_columns[Git.COMMIT_COMMITTER] = []
df_columns[Git.COMMIT_COMMITTER_DATE] = []
df_columns[Git.COMMIT_REPOSITORY] = []
df_columns[Git.COMMIT_MESSAGE] = []
df_columns[Git.COMMIT_NUM_FILES] = []
df_columns[Git.COMMIT_ADDED_LINES] = []
df_columns[Git.COMMIT_REMOVED_LINES] = []
df_columns[Git.COMMIT_HASH] = []
df_columns[Git.AUTHOR_DOMAIN] = []
# Second level of granularity
df_columns[Git.FILE_FILES] = []
df_columns[Git.FILE_EVENT] = []
df_columns[Git.FILE_PATH] = []
df_columns[Git.FILE_ADDED_LINES] = []
df_columns[Git.FILE_REMOVED_LINES] = []
events = pandas.DataFrame()
for item in self.items:
commit_data = item["data"]
if granularity == 1:
self._add_common_fields(df_columns, item)
self.__add_commit_info(df_columns, item)
added_lines = 0
removed_lines = 0
files = commit_data["files"]
df_columns[Git.COMMIT_NUM_FILES] = int(len(files))
for f in files:
if "added" in f.keys() and f["added"] != "-":
added_lines = added_lines + int(f["added"])
if "removed" in f.keys() and f["removed"] != "-":
removed_lines = removed_lines + int(f["removed"])
df_columns[Git.COMMIT_ADDED_LINES] = added_lines
df_columns[Git.COMMIT_REMOVED_LINES] = removed_lines
# TODO: this will fail if no files are found in a commit (eg: merge)
if granularity == 2:
# Add extra info about files actions, if there were any
if "files" in commit_data.keys():
files = commit_data["files"]
nfiles = 0
for f in files:
if "action" in f.keys():
nfiles += 1
for f in files:
self._add_common_fields(df_columns, item)
self.__add_commit_info(df_columns, item)
df_columns[Git.FILE_FILES].append(nfiles)
if "action" in f.keys():
df_columns[Git.FILE_EVENT].append(Git.EVENT_FILE + f["action"])
else:
df_columns[Git.FILE_EVENT].append("-")
if "file" in f.keys():
df_columns[Git.FILE_PATH].append(f["file"])
else:
df_columns[Git.FILE_PATH].append("-")
if "added" in f.keys():
if f["added"] == "-":
df_columns[Git.FILE_ADDED_LINES].append(0)
else:
df_columns[Git.FILE_ADDED_LINES].append(int(f["added"]))
else:
df_columns[Git.FILE_ADDED_LINES].append(0)
if "removed" in f.keys():
if f["removed"] == "-":
df_columns[Git.FILE_REMOVED_LINES].append(0)
else:
df_columns[Git.FILE_REMOVED_LINES].append(int(f["removed"]))
else:
df_columns[Git.FILE_REMOVED_LINES].append(0)
else:
print("Merge found, doing nothing...")
if granularity == 3:
# TDB
pass
# Done in this way to have an order (and not a direct cast)
self._add_common_events(events, df_columns)
events[Git.COMMIT_ID] = df_columns[Git.COMMIT_ID]
events[Git.COMMIT_EVENT] = df_columns[Git.COMMIT_EVENT]
events[Git.COMMIT_DATE] = df_columns[Git.COMMIT_DATE]
events[Git.COMMIT_OWNER] = df_columns[Git.COMMIT_OWNER]
events[Git.COMMIT_COMMITTER] = df_columns[Git.COMMIT_COMMITTER]
events[Git.COMMIT_COMMITTER_DATE] = df_columns[Git.COMMIT_COMMITTER_DATE]
events[Git.COMMIT_REPOSITORY] = df_columns[Git.COMMIT_REPOSITORY]
events[Git.COMMIT_MESSAGE] = df_columns[Git.COMMIT_MESSAGE]
events[Git.COMMIT_HASH] = df_columns[Git.COMMIT_HASH]
events[Git.AUTHOR_DOMAIN] = df_columns[Git.AUTHOR_DOMAIN]
if granularity == 1:
events[Git.COMMIT_NUM_FILES] = df_columns[Git.COMMIT_NUM_FILES]
events[Git.COMMIT_ADDED_LINES] = df_columns[Git.COMMIT_ADDED_LINES]
events[Git.COMMIT_REMOVED_LINES] = df_columns[Git.COMMIT_REMOVED_LINES]
if granularity == 2:
events[Git.FILE_FILES] = df_columns[Git.FILE_FILES]
events[Git.FILE_EVENT] = df_columns[Git.FILE_EVENT]
events[Git.FILE_PATH] = df_columns[Git.FILE_PATH]
events[Git.FILE_ADDED_LINES] = df_columns[Git.FILE_ADDED_LINES]
events[Git.FILE_REMOVED_LINES] = df_columns[Git.FILE_REMOVED_LINES]
return events
class Gerrit(Events):
""" Class used to 'eventize' Gerrit items
This splits information of each item based on a pre-configured mapping.
There are several levels of events. These levels were created as a way
to have more or less time consuming generation of events.
"""
EVENT_OPEN = "CHANGESET_SENT"
EVENT_ = "CHANGESET_"
# Fields supported by this module (when a DataFrame is returned)
CHANGESET_ID = "id"
CHANGESET_EVENT = "eventtype"
CHANGESET_DATE = "date"
CHANGESET_OWNER = "owner"
CHANGESET_EMAIL = "email"
CHANGESET_VALUE = "value"
CHANGESET_REPO = "repository"
def __init__(self, items):
""" Main constructor of the class
:param items: original list of JSON that contains all info about a commit
:type items: list
"""
self.items = items
def eventize(self, granularity):
""" This splits the JSON information found at self.events into the
several events. For this there are three different levels of time
consuming actions: 1-soft, 2-medium and 3-hard.
Level 1 provides events about commits
Level 2 provides events about files
Level 3 provides other events (not used so far)
:param granularity: Levels of time consuming actions to calculate events
:type granularity: integer
:returns: Pandas dataframe with splitted events.
:rtype: pandas.DataFrame
"""
changeset = {}
# First level granularity
changeset[Gerrit.CHANGESET_ID] = []
changeset[Gerrit.CHANGESET_EVENT] = []
changeset[Gerrit.CHANGESET_DATE] = []
changeset[Gerrit.CHANGESET_OWNER] = []
changeset[Gerrit.CHANGESET_EMAIL] = []
changeset[Gerrit.CHANGESET_VALUE] = []
changeset[Gerrit.CHANGESET_REPO] = []
events = pandas.DataFrame()
for item in self.items:
changeset_data = item["data"]
if granularity >= 1:
# Changeset submission date: filling a new event
changeset[Gerrit.CHANGESET_ID].append(changeset_data["number"])
changeset[Gerrit.CHANGESET_EVENT].append(Gerrit.EVENT_OPEN)
changeset[Gerrit.CHANGESET_DATE].append(dt.fromtimestamp(int(changeset_data["createdOn"])))
changeset[Gerrit.CHANGESET_REPO].append(changeset_data["project"])
value = email = "notknown"
if "name" in changeset_data["owner"].keys():
value = changeset_data["owner"]["name"]
elif "username" in changeset_data["owner"].keys():
value = changeset_data["owner"]["username"]
elif "email" in changeset_data["owner"].keys():
value = changeset_data["owner"]["email"]
email = changeset_data["owner"]["email"]
changeset[Gerrit.CHANGESET_OWNER].append(value)
changeset[Gerrit.CHANGESET_EMAIL].append(email)
changeset[Gerrit.CHANGESET_VALUE].append(-10)
# Adding the closing status updates (if there was any)
if changeset_data["status"] == 'ABANDONED' or \
changeset_data["status"] == 'MERGED':
closing_date = dt.fromtimestamp(int(changeset_data["lastUpdated"]))
changeset[Gerrit.CHANGESET_ID].append(changeset_data["number"])
changeset[Gerrit.CHANGESET_EVENT].append(Gerrit.EVENT_ +
changeset_data["status"])
changeset[Gerrit.CHANGESET_DATE].append(closing_date)
changeset[Gerrit.CHANGESET_REPO].append(changeset_data["project"])
value = email = "notknown"
if "name" in changeset_data["owner"].keys():
value = changeset_data["owner"]["name"]
if "username" in changeset_data["owner"].keys():
value = changeset_data["owner"]["username"]
if "email" in changeset_data["owner"].keys():
value = changeset_data["owner"]["email"]
email = changeset_data["owner"]["email"]
changeset[Gerrit.CHANGESET_OWNER].append(value)
changeset[Gerrit.CHANGESET_EMAIL].append(email)
changeset[Gerrit.CHANGESET_VALUE].append(-10)
if granularity >= 2:
# Adding extra info about the patchsets
for patchset in changeset_data["patchSets"]:
changeset[Gerrit.CHANGESET_ID].append(changeset_data["number"])
changeset[Gerrit.CHANGESET_EVENT].append(Gerrit.EVENT_ + "PATCHSET_SENT")
changeset[Gerrit.CHANGESET_DATE].append(
dt.fromtimestamp(int(patchset["createdOn"])))
changeset[Gerrit.CHANGESET_REPO].append(changeset_data["project"])
try:
email = "patchset_noname"
if "name" in patchset["author"].keys():
value = patchset["author"]["name"]
if "username" in patchset["author"].keys():
value = patchset["author"]["username"]
if "email" in patchset["author"].keys():
value = patchset["author"]["email"]
email = patchset["author"]["email"]
except KeyError:
value = "patchset_noname"
changeset[Gerrit.CHANGESET_OWNER].append(value)
changeset[Gerrit.CHANGESET_EMAIL].append(email)
changeset[Gerrit.CHANGESET_VALUE].append(-10)
# print (patchset)
if "approvals" in patchset.keys():
for approval in patchset["approvals"]:
if approval["type"] != "Code-Review":
continue
changeset[Gerrit.CHANGESET_ID].append(changeset_data["number"])
changeset[Gerrit.CHANGESET_EVENT].append(
Gerrit.EVENT_ +
"PATCHSET_APPROVAL_" + approval["type"])
changeset[Gerrit.CHANGESET_DATE].append(
dt.fromtimestamp(int(approval["grantedOn"])))
changeset[Gerrit.CHANGESET_REPO].append(changeset_data["project"])
email = "approval_noname"
if "name" in approval["by"].keys():
value = approval["by"]["name"]
elif "username" in approval["by"].keys():
value = approval["by"]["username"]
elif "email" in approval["by"].keys():
value = approval["by"]["email"]
email = approval["by"]["email"]
changeset[Gerrit.CHANGESET_OWNER].append(value)
changeset[Gerrit.CHANGESET_EMAIL].append(email)
changeset[Gerrit.CHANGESET_VALUE].append(int(approval["value"]))
if granularity >= 3:
# TDB
pass
# Done in this way to have an order (and not a direct cast)
events[Gerrit.CHANGESET_ID] = changeset[Gerrit.CHANGESET_ID]
events[Gerrit.CHANGESET_EVENT] = changeset[Gerrit.CHANGESET_EVENT]
events[Gerrit.CHANGESET_DATE] = changeset[Gerrit.CHANGESET_DATE]
events[Gerrit.CHANGESET_OWNER] = changeset[Gerrit.CHANGESET_OWNER]
events[Gerrit.CHANGESET_EMAIL] = changeset[Gerrit.CHANGESET_EMAIL]
events[Gerrit.CHANGESET_VALUE] = changeset[Gerrit.CHANGESET_VALUE]
events[Gerrit.CHANGESET_REPO] = changeset[Gerrit.CHANGESET_REPO]
return events
class Email(Events):
""" Class used to 'eventize' mailing list items
This splits information of each item based on a pre-configured mapping.
There are several levels of events. These levels were created as a way
to have more or less time consuming generation of events.
"""
EVENT_OPEN = "EMAIL_SENT"
# Fields supported by this module (when a DataFrame is returned)
EMAIL_ID = "id"
EMAIL_EVENT = "eventtype"
EMAIL_DATE = "date"
EMAIL_OWNER = "owner"
EMAIL_SUBJECT = "subject"
EMAIL_BODY = "body"
EMAIL_ORIGIN = "mailinglist"
def __init__(self, items):
""" Main constructor of the class
:param items: original list of JSON that contains all info about a commit
:type items: list
"""
self.items = items
def eventize(self, granularity):
""" This splits the JSON information found at self.events into the
several events. For this there are three different levels of time
consuming actions: 1-soft, 2-medium and 3-hard.
Level 1 provides events about emails
Level 2 not implemented
Level 3 not implemented
:param granularity: Levels of time consuming actions to calculate events
:type granularity: integer
:returns: Pandas dataframe with splitted events.
:rtype: pandas.DataFrame
"""
email = {}
# First level granularity
email[Email.EMAIL_ID] = []
email[Email.EMAIL_EVENT] = []
email[Email.EMAIL_DATE] = []
email[Email.EMAIL_OWNER] = []
email[Email.EMAIL_SUBJECT] = []
email[Email.EMAIL_BODY] = []
email[Email.EMAIL_ORIGIN] = []
events = pandas.DataFrame()
for item in self.items:
origin = item["origin"]
email_data = item["data"]
if granularity == 1:
# Changeset submission date: filling a new event
email[Email.EMAIL_ID].append(email_data["Message-ID"])
email[Email.EMAIL_EVENT].append(Email.EVENT_OPEN)
try:
email[Email.EMAIL_DATE].append(str_to_datetime(email_data["Date"], ignoretz=True))
except KeyError:
email[Email.EMAIL_DATE].append(str_to_datetime("1970-01-01"))
email[Email.EMAIL_OWNER].append(email_data["From"])
email[Email.EMAIL_SUBJECT].append(email_data["Subject"])
try:
email[Email.EMAIL_BODY].append(email_data["body"]["plain"])
except KeyError:
email[Email.EMAIL_BODY].append("None")
email[Email.EMAIL_ORIGIN].append(origin)
if granularity == 2:
# TDB
pass
if granularity == 3:
# TDB
pass
# Done in this way to have an order (and not a direct cast)
events[Email.EMAIL_ID] = email[Email.EMAIL_ID]
events[Email.EMAIL_EVENT] = email[Email.EMAIL_EVENT]
events[Email.EMAIL_DATE] = email[Email.EMAIL_DATE]
events[Email.EMAIL_OWNER] = email[Email.EMAIL_OWNER]
events[Email.EMAIL_SUBJECT] = email[Email.EMAIL_SUBJECT]
events[Email.EMAIL_BODY] = email[Email.EMAIL_BODY]
events[Email.EMAIL_ORIGIN] = email[Email.EMAIL_ORIGIN]
return events
|
dicortazar/ceres
|
cereslib/events/events.py
|
Python
|
lgpl-3.0
| 35,417
|
[
"Elk"
] |
fd46d4d55e59b267c4e1d55d70ed6b593bec9629e5ddfc5ba9b35dd980494def
|
#!/usr/bin/env python3
# Copyright (C) 2016-2017(H)
# Max Planck Institute for Polymer Research
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
###########################################################################
# #
# ESPResSo++ Python script for an methanol simulation #
# #
###########################################################################
import mpi4py.MPI as MPI
import espressopp
from espressopp import Real3D
from espressopp.tools import gromacs
import math
import os
import time
import sys
from math import sqrt
import random
import logging
from datetime import datetime
# For simulating the solvation of a small solute in aqueous solution
# with Thermodynamic Integration.
# Reads in atomistic coord file (gro) and topology (topol.top) written in gromacs format.
# Uses the AdResS scheme so that the SETTLE algorithm can be used.
# In this example, the atomistic region is made so large that the entire box is atomistic.
# For the AdResS scheme:
# assumes solute is small enough that it can be mapped to one coarse-grained particle
# assumes solute is listed first in .gro file, and solute will always be atomistic
# assumes solute is small enough that there are no non-bonded interactions within the solute
# solvent (water) molecules each correspond to one coarse-grained particle containing three atomistic particles
########################################################################
# 1. specification of the main system setup and simulation parameters #
########################################################################
# solute indices
atSoluteIndices = [x for x in range(1,7)] #1 to 6 inclusive
nSoluteAtoms = len(atSoluteIndices)
nSoluteCgparticles = 1
# indices of atoms in water molecules with adaptive resolution
atWaterIndices = [x for x in range(7,2086)] #water atoms, 7 to 2085 inclusive
nWaterAtoms = len(atWaterIndices)
nWaterAtomsPerMol = 3 #number of atoms per cg water bead
nWaterMols = nWaterAtoms/nWaterAtomsPerMol
adresRegionCentreAtIndex = 1 #index of atom at centre of AdResS region
# input coordinates
inputcrdfile = "conf.gro"
# atomistic forcefield
aatopfile = "topol.top"
# output trajectory file
trjfile = "trj.gro"
# system parameters
# NB cutoff
nbCutoff = 1.2
# VerletList skin size (also used for domain decomposition)
skin = 0.2
# the temperature of the system
temperatureConvFactor = 120.27239 # 1/(kB in kJ K-1 mol-1) (input vel should be in nm/ps), for converting from reduced units to K
#temperature = None
temperature = 298.0 # Kelvin
temperature = float(temperature)/temperatureConvFactor #in units of kJ mol-1
pressure = None
# time step for the velocity verlet integrator
dt = 0.002 #ps
nSteps = 2000
nStepsPerOutput = 10
nStepsPerTrjoutput = 500
nOutput = nSteps/nStepsPerOutput
# Parameters for size of AdResS dimensions
ex_size = 20.00
hy_size = 1.00
# Parameters for Thermodynamic Integration
stateBIndices = atSoluteIndices #indices of atoms whose charge and LJ parameters are zero in TI state B
lambdaVectorCoul = [0.00, 0.05, 0.10, 0.15, 0.20, 0.25, 0.30, 0.35, 0.40, 0.45, 0.50, 0.55, 0.60, 0.65, 0.70, 0.75, 0.80, 0.85, 0.90, 0.95, 1.00, 1.000, 1.000, 1.000, 1.000, 1.000, 1.000, 1.000, 1.000, 1.000, 1.000, 1.000, 1.000, 1.000, 1.000, 1.000, 1.000, 1.000, 1.000, 1.000, 1.000, 1.000, 1.000, 1.000, 1.000, 1.000, 1.000, 1.000, 1.000, 1.000, 1.000, 1.000, 1.000, 1.000, 1.000, 1.000, 1.000, 1.000, 1.000, 1.000, 1.000]
lambdaVectorVdwl = [0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.025, 0.050, 0.075, 0.100, 0.125, 0.150, 0.175, 0.200, 0.225, 0.250, 0.275, 0.300, 0.325, 0.350, 0.375, 0.400, 0.425, 0.450, 0.475, 0.500, 0.525, 0.550, 0.575, 0.600, 0.625, 0.650, 0.675, 0.700, 0.725, 0.750, 0.775, 0.800, 0.825, 0.850, 0.875, 0.900, 0.925, 0.950, 0.975, 1.000]
lambdaIndex = 0
lambdaTICoul = lambdaVectorCoul[lambdaIndex]
lambdaTIVdwl = lambdaVectorVdwl[lambdaIndex]
alphaSC = 0.5
powerSC = 1.0
sigmaSC = 0.3
dhdlFile = "dhdl.xvg"
dhdlF = open(dhdlFile,'a')
dhdlF.write("#(coul-lambda, vdw-lambda) = ("+str(lambdaTICoul)+", "+str(lambdaTIVdwl)+"\n")
print('# radius of atomistic region = ',ex_size)
print('# thickness of hybrid region = ',hy_size)
# print ESPResSo++ version and compile info
print('# ',espressopp.Version().info())
# print simulation parameters (useful to have them in a log file)
print("# nbCutoff = ", nbCutoff)
print("# skin = ", skin)
print("# dt = ", dt)
print("# nSteps = ", nSteps)
print("# output every ",nStepsPerOutput," steps")
print("# trajectory output every ",nStepsPerTrjoutput," steps")
########################################################################
# 2. read in coordinates and topology
########################################################################
## get info on (complete) atomistic system ##
print('# Reading gromacs top and gro files...')
# call gromacs parser for processing the top file (and included files) and the gro file
defaults, atTypes, atomtypesDict, atMasses, atCharges, atomtypeparameters, atBondtypes, bondtypeparams, atAngletypes, angletypeparams, atDihedraltypes, dihedraltypeparams, impropertypeparams, atExclusions, atOnefourpairslist, atX, atY, atZ, atVX, atVY, atVZ, atResnames, atResid, Lx, Ly, Lz = gromacs.read(inputcrdfile,aatopfile)
reverseAtomtypesDict = dict([(v, k) for k, v in atomtypesDict.items()])
# system box size
box = (Lx, Ly, Lz)
print("# Box size = ", box)
nParticlesRead = len(atX)
print("# total number of particles read from atomistic config file = ",nParticlesRead)
print("# number of atomistic particles in solute = ",nSoluteAtoms)
print("# number of coarse-grained particles in solute = ",nSoluteCgparticles)
print("# number of atomistic particles in solvent = ",nWaterAtoms)
print("# number of coarse-grained particles in solvent = ",nWaterMols)
nParticlesTotal = nSoluteAtoms + nSoluteCgparticles + nWaterAtoms + nWaterMols
print("# total number of particles after setup = ",nParticlesTotal)
if (nParticlesRead != (nSoluteAtoms+nWaterAtoms)):
print("problem: no. particles in crd file != np. of atomistic particles specified")
print("values: ",nParticlesRead,nSoluteAtoms+nWaterAtoms)
quit()
particleX = []
particleY = []
particleZ = []
particlePID = []
particleTypes = []
particleMasses = []
particleCharges = []
particleTypestring = []
particleVX = []
particleVY = []
particleVZ = []
#pids will be in order: atomistic solute atoms, atomistic water atoms, cg solute particle, cg water molecules
#atomistic particles (solute and water)
for i in range(nSoluteAtoms+nWaterAtoms):
particlePID.append(i+1)
particleMasses.append(atMasses[i])
particleCharges.append(atCharges[i])
particleTypes.append(atTypes[i])
particleTypestring.append('atomistic__')
particleX.append(atX[i])
particleY.append(atY[i])
particleZ.append(atZ[i])
particleVX.append(atVX[i])
particleVY.append(atVY[i])
particleVZ.append(atVZ[i])
#cg solute particle
typeCGSolute = max(reverseAtomtypesDict.keys())+2
reverseAtomtypesDict[typeCGSolute] = 'PCG'
cgPid = nSoluteAtoms + nWaterAtoms + 1
cgSoluteParticlesDict = {} #map particlePID of cg particle to original atomistic indices
cgSoluteParticlesDict[cgPid] = []
charge = 0.0 #not needed on CG particles
mass = 0.0
for j in range(int(nSoluteAtoms)):
mass += atMasses[j]
cgSoluteParticlesDict[cgPid].append(j+1)
particlePID.append(cgPid)
particleMasses.append(mass)
particleCharges.append(charge)
particleTypes.append(typeCGSolute)
particleTypestring.append('cg_solute__')
index = 0
particleX.append(atX[index]) #set to first atom value for the moment, will be reset to COM later
particleY.append(atY[index])
particleZ.append(atZ[index])
particleVX.append(atVX[index])
particleVY.append(atVY[index])
particleVZ.append(atVZ[index])
#cg water particles
typeCG=max(reverseAtomtypesDict.keys())+2
reverseAtomtypesDict[typeCG]='WCG'
for i in range(int(nWaterMols)):
particlePID.append(i+1+nSoluteAtoms+nSoluteCgparticles+nWaterAtoms)
indexO=atWaterIndices[3*i]-1
particleMasses.append(atMasses[indexO]+atMasses[indexO+1]+atMasses[indexO+2])
particleCharges.append(0.0)
particleTypes.append(typeCG)
particleTypestring.append('adres_cg___')
particleX.append(atX[indexO]) # put CG particle on O for the moment, later CG particle will be positioned in centre
particleY.append(atY[indexO])
particleZ.append(atZ[indexO])
particleVX.append(atVX[indexO]) # give CG particle velocity of O for the moment
particleVY.append(atVY[indexO])
particleVZ.append(atVZ[indexO])
print('# system total charge = ',sum(particleCharges))
########################################################################
# 2. setup of the system, random number geneartor and parallelisation #
########################################################################
# create the basic system
system = espressopp.System()
# use the random number generator that is included within the ESPResSo++ package
xs = time.time()
seed = int(xs % int(xs) * 10000000000)
print("RNG Seed:", seed)
rng = espressopp.esutil.RNG()
rng.seed(seed)
system.rng = rng
# use orthorhombic periodic boundary conditions
system.bc = espressopp.bc.OrthorhombicBC(system.rng, box)
# set the skin size used for verlet lists and cell sizes
system.skin = skin
# get the number of CPUs to use
NCPUs = espressopp.MPI.COMM_WORLD.size
# calculate a regular 3D grid according to the number of CPUs available
nodeGrid = espressopp.tools.decomp.nodeGrid(NCPUs,box,nbCutoff,skin)
# calculate a 3D subgrid to speed up verlet list builds and communication
cellGrid = espressopp.tools.decomp.cellGrid(box, nodeGrid, nbCutoff, skin)
# create a domain decomposition particle storage with the calculated nodeGrid and cellGrid
system.storage = espressopp.storage.DomainDecompositionAdress(system, nodeGrid, cellGrid)
print("# NCPUs = ", NCPUs)
print("# nodeGrid = ", nodeGrid)
print("# cellGrid = ", cellGrid)
########################################################################
# 4. adding the particles and build structure #
########################################################################
properties = ['id', 'type', 'pos', 'v', 'mass', 'q', 'adrat']
allParticles = []
tuples = []
#add particles in order CG1,AA11,AA12,AA13...CG2,AA21,AA22,AA23... etc.
mapAtToCgIndex = {}
#first adres particles
for i in range(int(nWaterMols)):
cgindex = i + nSoluteAtoms + nSoluteCgparticles + nWaterAtoms
tmptuple = [particlePID[cgindex]]
# first CG particle
allParticles.append([particlePID[cgindex],
particleTypes[cgindex],
Real3D(particleX[cgindex],particleY[cgindex],particleZ[cgindex]),
Real3D(particleVX[cgindex],particleVY[cgindex],particleVZ[cgindex]),
particleMasses[cgindex],particleCharges[cgindex],0])
# then AA particles
for j in range(int(nWaterAtomsPerMol)):
aaindex = i*nWaterAtomsPerMol + j + nSoluteAtoms
tmptuple.append(particlePID[aaindex])
allParticles.append([particlePID[aaindex],
particleTypes[aaindex],
Real3D(particleX[aaindex],particleY[aaindex],particleZ[aaindex]),
Real3D(particleVX[aaindex],particleVY[aaindex],particleVZ[aaindex]),
particleMasses[aaindex],particleCharges[aaindex],1])
mapAtToCgIndex[particlePID[aaindex]]=particlePID[cgindex]
tuples.append(tmptuple)
# then solute
aaindex = 0
for i in range(int(nSoluteCgparticles)):
cgindex = i + nSoluteAtoms + nWaterAtoms
tmptuple = [particlePID[cgindex]]
allParticles.append([particlePID[cgindex],particleTypes[cgindex],
Real3D(particleX[cgindex],particleY[cgindex],particleZ[cgindex]),
Real3D(particleVX[cgindex],particleVY[cgindex],particleVZ[cgindex]),
particleMasses[cgindex],particleCharges[cgindex],0])
soluteAtomsInCgParticle = cgSoluteParticlesDict[particlePID[cgindex]]
for j in soluteAtomsInCgParticle:
aaindex = j - 1
tmptuple.append(particlePID[aaindex])
allParticles.append([particlePID[aaindex],particleTypes[aaindex],
Real3D(particleX[aaindex],particleY[aaindex],particleZ[aaindex]),
Real3D(particleVX[aaindex],particleVY[aaindex],particleVZ[aaindex]),
particleMasses[aaindex],particleCharges[aaindex],1])
mapAtToCgIndex[particlePID[aaindex]]=particlePID[cgindex]
tuples.append(tmptuple)
print('# adding ',len(allParticles),' particles')
system.storage.addParticles(allParticles, *properties)
# create FixedTupleList object
ftpl = espressopp.FixedTupleListAdress(system.storage)
ftpl.addTuples(tuples)
system.storage.setFixedTuplesAdress(ftpl)
system.storage.decompose()
# print file to check if all particles were correctly added
if (0):
file=open('system.out','w')
for i in range(1,nParticlesTotal+1):
if i <= nSoluteAtoms + nWaterAtoms:
vp = mapAtToCgIndex[i]
else:
vp = 0
part = system.storage.getParticle(i)
ptype = part.type
st="%7d %d %7.3f %7.3f %7.3f %3d %5s %7.3f %7.3f %8s\n"%(i,vp,part.pos[0],part.pos[1],part.pos[2],ptype,reverseAtomtypesDict[ptype],part.mass,part.q,particleTypestring[i-1])
file.write(st)
file.close()
########################################################################
# 3. setup of the integrator and simulation ensemble #
########################################################################
# use a velocity Verlet integration scheme
integrator = espressopp.integrator.VelocityVerlet(system)
# set the integration step
integrator.dt = dt
# use a thermostat if the temperature is set
if (temperature != None):
# create Langevin thermostat
thermostat = espressopp.integrator.LangevinThermostat(system)
# set Langevin friction constant
thermostat.gamma = 10.0 # units ps-1
print("# gamma for langevin thermostat = ",thermostat.gamma)
# set temperature
thermostat.temperature = temperature
# switch on for adres
thermostat.adress = True
print("# thermostat temperature = ", temperature*temperatureConvFactor)
# tell the integrator to use this thermostat
integrator.addExtension(thermostat)
else:
print("#No thermostat")
########################################################################
# 6. define atomistic and adres interactions
########################################################################
## adres interactions ##
cm = adresRegionCentreAtIndex
print('# spherical moving atomistic region for adres centred on atom ',cm,' i.e. cg particle ',mapAtToCgIndex[cm])
verletlist = espressopp.VerletListAdress(system, cutoff=nbCutoff, adrcut=nbCutoff,
dEx=ex_size, dHy=hy_size,
pids=[mapAtToCgIndex[cm]], sphereAdr=True)
# set up LJ interaction according to the parameters read from the .top file
lj_adres_interaction = gromacs.setLennardJonesInteractionsTI(system, defaults, atomtypeparameters, verletlist, nbCutoff, epsilonB=0.0, sigmaSC=sigmaSC, alphaSC=alphaSC, powerSC=powerSC, lambdaTI=lambdaTIVdwl, pidlist=stateBIndices, annihilate=False, adress=True, ftpl=ftpl)
# set up coulomb interactions according to the parameters read from the .top file
# !! Warning: this only works for reaction-field now!
qq_adres_interaction = gromacs.setCoulombInteractionsTI(system, verletlist, nbCutoff, atTypes, epsilon1=1, epsilon2=80, kappa=0, lambdaTI=lambdaTICoul, pidlist=stateBIndices, annihilate=False, adress=True, ftpl=ftpl)
# bonded (fixed list) interactions in solute (between atomistic particles, solute is one coarse-grained particle)
# only for solute, no bonded interactions for water
# set up LJ 1-4 interactions
onefourlist = espressopp.FixedPairListAdress(system.storage,ftpl)
onefourlist.addBonds(atOnefourpairslist)
lj14interaction=gromacs.setLennardJones14Interactions(system, defaults, atomtypeparameters, onefourlist, nbCutoff)
# set up coulomb 1-4 interactions
qq14_interactions=gromacs.setCoulomb14Interactions(system, defaults, onefourlist, nbCutoff, atTypes)
## set up bond interactions according to the parameters read from the .top file
bondedinteractions=gromacs.setBondedInteractionsAdress(system, atBondtypes, bondtypeparams,ftpl)
# set up angle interactions according to the parameters read from the .top file
angleinteractions=gromacs.setAngleInteractionsAdress(system, atAngletypes, angletypeparams,ftpl)
# set up dihedral interactions according to the parameters read from the .top file
dihedralinteractions=gromacs.setDihedralInteractionsAdress(system, atDihedraltypes, dihedraltypeparams,ftpl)
# uncomment the next line if necessary (methanol does not contain impropers)
# set up improper interactions according to the parameters read from the .top file
#improperinteractions=gromacs.setImproperInteractionsAdress(system, atImpropertypes, impropertypeparams,ftpl)
# create an exclusions list and uncomment the next line if necessary (methanol does not need this line)
#verletlist.exclude(cgExclusions)
#print '# ',len(cgExclusions),' exclusions'
count = system.getNumberOfInteractions()
print('# ',count,' interactions defined')
# settle water
molidlist=[]
for wm in range(int(nWaterMols)):
molidlist.append(tuples[wm][0]) #assuming water is listed first
print('#Warning: settle set-up assumes water was listed first when tuples were constructed')
settlewaters = espressopp.integrator.Settle(system, ftpl, mO=15.9994, mH=1.008, distHH=0.15136, distOH=0.09572)
settlewaters.addMolecules(molidlist)
integrator.addExtension(settlewaters)
print('# Settling ',len(molidlist), ' waters')
# for settle water
nconstr = nWaterAtoms
nAtoms = nWaterAtoms + nSoluteAtoms
ndof_unconstr = nAtoms*3-3
ndof_constr = ndof_unconstr-nconstr
temp_correction_factor = float(ndof_unconstr)/float(ndof_constr)
print("# Correcting temperature for constraints, using factor: ",temp_correction_factor)
print("# calculated using nAtoms = ",nAtoms, "nconstraints = ",nconstr," and ndof_constr = ",ndof_constr," of which ",3*nWaterMols," are from SETTLE")
# add AdResS
adress = espressopp.integrator.Adress(system,verletlist,ftpl)
integrator.addExtension(adress)
# distribute atoms and CG molecules according to AdResS domain decomposition, place CG molecules in the center of mass
print('# Decomposing...')
espressopp.tools.AdressDecomp(system, integrator)
########################################################################
# 7. run #
########################################################################
temperature = espressopp.analysis.Temperature(system)
print("# starting run...")
dump_conf_gro = espressopp.io.DumpGROAdress(system, ftpl, integrator, filename=trjfile, unfolded=False)
print('Start time: ', str(datetime.now()))
print("i*dt,Eb, EAng, Edih, EImp, ELj, Elj14, EQQ, EQQ14, Epot, T")
fmt='%5.5f %15.8g %15.8g %15.8g %15.8g %15.8g %15.8g %15.8g %15.8g %15.8f %15.8f\n'
integrator.run(0)
for k in range(int(nOutput)):
i=k*nStepsPerOutput
EQQ = 0.0
EQQ14 = 0.0
ELj = 0.0
ELj14 = 0.0
Eb = 0.0
EAng = 0.0
EDih = 0.0
EImp = 0.0
for bd in list(bondedinteractions.values()): Eb+=bd.computeEnergy()
for ang in list(angleinteractions.values()): EAng+=ang.computeEnergy()
for dih in list(dihedralinteractions.values()): EDih+=dih.computeEnergy()
#for imp in improperinteractions.values(): EImp+=imp.computeEnergy()
ELj= lj_adres_interaction.computeEnergy()
ELj14 = lj14interaction.computeEnergy()
EQQ = qq_adres_interaction.computeEnergy()
EQQ14 = qq14_interactions.computeEnergy()
dhdlCoul = qq_adres_interaction.computeEnergyDeriv()
dhdlVdwl = lj_adres_interaction.computeEnergyDeriv()
dhdlF.write(str(i*dt)+" "+str(dhdlCoul)+" "+str(dhdlVdwl)+"\n")
T = temperature.compute()
Epot = Eb+EAng+EDih+EImp+EQQ+EQQ14+ELj+ELj14
print((fmt%(i*dt,Eb, EAng, EDih, EImp, ELj, ELj14, EQQ, EQQ14, Epot, T*temperatureConvFactor*temp_correction_factor)), end='')
sys.stdout.flush()
dhdlF.flush()
integrator.run(nStepsPerOutput)
particle = system.storage.getParticle(1)
if math.isnan(particle.pos[0]):
quit()
if (i > 0) and (i % nStepsPerTrjoutput == 0):
dump_conf_gro.dump()
print('End time: ', str(datetime.now()))
|
espressopp/espressopp
|
examples/thd_integration_solvation/methanol-TI.py
|
Python
|
gpl-3.0
| 21,683
|
[
"ESPResSo",
"Gromacs"
] |
c4436babc7318132c8767e1b9d4281e81a81b304a72ab7615c1eea329f80b8d6
|
#! /usr/bin/python
# -*- coding: utf-8 -*-
#-----------------------------------------------------------------------------
# Name: setup.py
# Purpose:
# Author: Fabien Marteau <fabien.marteau@armadeus.com>
# Created: 16/02/2009
#-----------------------------------------------------------------------------
# Copyright (2008) Armadeus Systems
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
#-----------------------------------------------------------------------------
# Revision list :
#
# Date By Changes
#
#-----------------------------------------------------------------------------
from distutils.core import setup
import os,re
import sys
sys.path.append("src/bin/")
from version import *
def visit(libfile,dirname,names):
""" function used for getLibraryTree to walk throw library tree"""
for file in names:
filepath = os.path.join(dirname,file)
if not os.path.isdir(filepath):
if not re.search(r".svn",filepath):
# FIXME:
# I can't find how to split with os.path !
# will be used when package_data work
#realpath = "/".join(filepath.split("/")[1:])
#libfile.append(realpath)
libfile.append(filepath)
def getTree(directory):
""" return a tuple list of files """
libfile = []
os.path.walk(os.path.join("src",directory),visit,libfile)
new_libfile = []
for path_file in libfile:
new_libfile.append('/'.join(path_file.split('/')[1:]))
if (directory == "platforms"):
print str(new_libfile)
return new_libfile
# Package files
package_files_list = []
package_files_list.extend(getTree("library"))
package_files_list.extend(getTree("platforms"))
package_files_list.extend(getTree("templates"))
package_files_list.extend(getTree("busses"))
package_files_list.extend(getTree("toolchains"))
package_files_list.extend(getTree("tests"))
datafiles=[ ('/usr/bin',['src/bin/pod']) ]
setup( name='PeriphOnDemand',
version=getVersion(),
url='https://sourceforge.net/projects/periphondemand',
author='Fabien Marteau and Nicolas Colombain',
author_email='<fabien.marteau@armadeus.com>,<nicolas.colombain@armadeus.com>,',
maintainer='Fabien Marteau',
maintainer_email='fabien.marteau@armadeus.com',
package_dir = {"periphondemand":"src"},
packages=['periphondemand',
'periphondemand.bin',
'periphondemand.bin.code',
'periphondemand.bin.code.vhdl',
'periphondemand.bin.commandline',
'periphondemand.bin.core',
'periphondemand.bin.toolchain',
'periphondemand.bin.utils',
],
package_data = {'periphondemand':package_files_list},
data_files=datafiles,
license='GPL',
)
|
xcthulhu/periphondemand
|
setup.py
|
Python
|
lgpl-2.1
| 3,550
|
[
"VisIt"
] |
2dbd561d7bcbe08e762b78fd9ed447fe9010d97944dee8760038642229bbca69
|
"""
Functions for visualizing results of quantum dynamics simulations,
visualizations of quantum states and processes.
"""
__all__ = ['hinton', 'sphereplot', 'energy_level_diagram',
'plot_energy_levels', 'fock_distribution',
'plot_fock_distribution', 'wigner_fock_distribution',
'plot_wigner_fock_distribution', 'plot_wigner',
'plot_expectation_values', 'plot_spin_distribution_2d',
'plot_spin_distribution_3d', 'plot_qubism', 'plot_schmidt',
'complex_array_to_rgb', 'matrix_histogram',
'matrix_histogram_complex', 'sphereplot', 'plot_wigner_sphere']
import warnings
import itertools as it
import numpy as np
from numpy import pi, array, sin, cos, angle, log2
from packaging.version import parse as parse_version
from qutip.qobj import Qobj, isket
from qutip.states import ket2dm
from qutip.wigner import wigner
from qutip.tensor import tensor
from qutip.matplotlib_utilities import complex_phase_cmap
from qutip.superoperator import vector_to_operator
from qutip.superop_reps import _super_to_superpauli, _isqubitdims
from qutip import settings
try:
import matplotlib.pyplot as plt
import matplotlib as mpl
from matplotlib import cm
from mpl_toolkits.mplot3d import Axes3D
# Define a custom _axes3D function based on the matplotlib version.
# The auto_add_to_figure keyword is new for matplotlib>=3.4.
if parse_version(mpl.__version__) >= parse_version('3.4'):
def _axes3D(fig, *args, **kwargs):
ax = Axes3D(fig, *args, auto_add_to_figure=False, **kwargs)
return fig.add_axes(ax)
else:
def _axes3D(*args, **kwargs):
return Axes3D(*args, **kwargs)
except:
pass
def plot_wigner_sphere(fig, ax, wigner, reflections):
"""Plots a coloured Bloch sphere.
Parameters
----------
fig : :obj:`matplotlib.figure.Figure`
An instance of :obj:`~matplotlib.figure.Figure`.
ax : :obj:`matplotlib.axes.Axes`
An axes instance in the given figure.
wigner : list of float
The wigner transformation at `steps` different theta and phi.
reflections : bool
If the reflections of the sphere should be plotted as well.
Notes
------
Special thanks to Russell P Rundle for writing this function.
"""
ax.set_xlabel("x")
ax.set_ylabel("y")
ax.set_zlabel("z")
steps = len(wigner)
theta = np.linspace(0, np.pi, steps)
phi = np.linspace(0, 2 * np.pi, steps)
x = np.outer(np.sin(theta), np.cos(phi))
y = np.outer(np.sin(theta), np.sin(phi))
z = np.outer(np.cos(theta), np.ones(steps))
wigner = np.real(wigner)
wigner_max = np.real(np.amax(np.abs(wigner)))
wigner_c1 = cm.seismic_r((wigner + wigner_max) / (2 * wigner_max))
# Plot coloured Bloch sphere:
ax.plot_surface(x, y, z, facecolors=wigner_c1, vmin=-wigner_max,
vmax=wigner_max, rcount=steps, ccount=steps, linewidth=0,
zorder=0.5, antialiased=None)
if reflections:
wigner_c2 = cm.seismic_r((wigner[0:steps, 0:steps]+wigner_max) /
(2*wigner_max)) # bottom
wigner_c3 = cm.seismic_r((wigner[0:steps, 0:steps]+wigner_max) /
(2*wigner_max)) # side
wigner_c4 = cm.seismic_r((wigner[0:steps, 0:steps]+wigner_max) /
(2*wigner_max)) # back
# Plot bottom reflection:
ax.plot_surface(x[0:steps, 0:steps], y[0:steps, 0:steps],
-1.5*np.ones((steps, steps)), facecolors=wigner_c2,
vmin=-wigner_max, vmax=wigner_max, rcount=steps/2,
ccount=steps/2, linewidth=0, zorder=0.5,
antialiased=False)
# Plot side reflection:
ax.plot_surface(-1.5*np.ones((steps, steps)), y[0:steps, 0:steps],
z[0:steps, 0:steps], facecolors=wigner_c3,
vmin=-wigner_max, vmax=wigner_max, rcount=steps/2,
ccount=steps/2, linewidth=0, zorder=0.5,
antialiased=False)
# Plot back reflection:
ax.plot_surface(x[0:steps, 0:steps], 1.5*np.ones((steps, steps)),
z[0:steps, 0:steps], facecolors=wigner_c4,
vmin=-wigner_max, vmax=wigner_max, rcount=steps/2,
ccount=steps/2, linewidth=0, zorder=0.5,
antialiased=False)
# Create colourbar:
m = cm.ScalarMappable(cmap=cm.seismic_r)
m.set_array([-wigner_max, wigner_max])
plt.colorbar(m, shrink=0.5, aspect=10)
plt.show()
# Adopted from the SciPy Cookbook.
def _blob(x, y, w, w_max, area, color_fn, ax=None):
"""
Draws a square-shaped blob with the given area (< 1) at
the given coordinates.
"""
hs = np.sqrt(area) / 2
xcorners = array([x - hs, x + hs, x + hs, x - hs])
ycorners = array([y - hs, y - hs, y + hs, y + hs])
if ax is not None:
handle = ax
else:
handle = plt
handle.fill(xcorners, ycorners,
color=color_fn(w))
def _cb_labels(left_dims):
"""Creates plot labels for matrix elements in the computational basis.
Parameters
----------
left_dims : flat list of ints
Dimensions of the left index of a density operator. E. g.
[2, 3] for a qubit tensored with a qutrit.
Returns
-------
left_labels, right_labels : lists of strings
Labels for the left and right indices of a density operator
(kets and bras, respectively).
"""
# FIXME: assumes dims, such that we only need left_dims == dims[0].
basis_labels = list(map(",".join, it.product(*[
map(str, range(dim))
for dim in left_dims
])))
return [
map(fmt.format, basis_labels) for fmt in
(
r"$|{}\rangle$",
r"$\langle{}|$"
)
]
# Adopted from the SciPy Cookbook.
def hinton(rho, xlabels=None, ylabels=None, title=None, ax=None, cmap=None,
label_top=True, color_style="scaled"):
"""Draws a Hinton diagram for visualizing a density matrix or superoperator.
Parameters
----------
rho : qobj
Input density matrix or superoperator.
xlabels : list of strings or False
list of x labels
ylabels : list of strings or False
list of y labels
title : string
title of the plot (optional)
ax : a matplotlib axes instance
The axes context in which the plot will be drawn.
cmap : a matplotlib colormap instance
Color map to use when plotting.
label_top : bool
If True, x-axis labels will be placed on top, otherwise
they will appear below the plot.
color_style : string
Determines how colors are assigned to each square:
- If set to ``"scaled"`` (default), each color is chosen by
passing the absolute value of the corresponding matrix
element into `cmap` with the sign of the real part.
- If set to ``"threshold"``, each square is plotted as
the maximum of `cmap` for the positive real part and as
the minimum for the negative part of the matrix element;
note that this generalizes `"threshold"` to complex numbers.
- If set to ``"phase"``, each color is chosen according to
the angle of the corresponding matrix element.
Returns
-------
fig, ax : tuple
A tuple of the matplotlib figure and axes instances used to produce
the figure.
Raises
------
ValueError
Input argument is not a quantum object.
Examples
--------
>>> import qutip
>>>
>>> dm = qutip.rand_dm(4)
>>> fig, ax = qutip.hinton(dm)
>>> fig.show()
>>>
>>> qutip.settings.colorblind_safe = True
>>> fig, ax = qutip.hinton(dm, color_style="threshold")
>>> fig.show()
>>> qutip.settings.colorblind_safe = False
>>>
>>> fig, ax = qutip.hinton(dm, color_style="phase")
>>> fig.show()
"""
# Apply default colormaps.
# TODO: abstract this away into something that makes default
# colormaps.
cmap = (
(cm.Greys_r if settings.colorblind_safe else cm.RdBu)
if cmap is None else cmap
)
# Extract plotting data W from the input.
if isinstance(rho, Qobj):
if rho.isoper:
W = rho.full()
# Create default labels if none are given.
if xlabels is None or ylabels is None:
labels = _cb_labels(rho.dims[0])
xlabels = xlabels if xlabels is not None else list(labels[0])
ylabels = ylabels if ylabels is not None else list(labels[1])
elif rho.isoperket:
W = vector_to_operator(rho).full()
elif rho.isoperbra:
W = vector_to_operator(rho.dag()).full()
elif rho.issuper:
if not _isqubitdims(rho.dims):
raise ValueError("Hinton plots of superoperators are "
"currently only supported for qubits.")
# Convert to a superoperator in the Pauli basis,
# so that all the elements are real.
sqobj = _super_to_superpauli(rho)
nq = int(log2(sqobj.shape[0]) / 2)
W = sqobj.full().T
# Create default labels, too.
if (xlabels is None) or (ylabels is None):
labels = list(map("".join, it.product("IXYZ", repeat=nq)))
xlabels = xlabels if xlabels is not None else labels
ylabels = ylabels if ylabels is not None else labels
else:
raise ValueError(
"Input quantum object must be an operator or superoperator."
)
else:
W = rho
if ax is None:
fig, ax = plt.subplots(1, 1, figsize=(8, 6))
else:
fig = None
if not (xlabels or ylabels):
ax.axis('off')
if title:
ax.set_title(title)
ax.axis('equal')
ax.set_frame_on(False)
height, width = W.shape
w_max = 1.25 * max(abs(np.diag(np.array(W))))
if w_max <= 0.0:
w_max = 1.0
# Set color_fn here.
if color_style == "scaled":
def color_fn(w):
w = np.abs(w) * np.sign(np.real(w))
return cmap(int((w + w_max) * 256 / (2 * w_max)))
elif color_style == "threshold":
def color_fn(w):
w = np.real(w)
return cmap(255 if w > 0 else 0)
elif color_style == "phase":
def color_fn(w):
return cmap(int(255 * (np.angle(w) / 2 / np.pi + 0.5)))
else:
raise ValueError(
"Unknown color style {} for Hinton diagrams.".format(color_style)
)
ax.fill(array([0, width, width, 0]), array([0, 0, height, height]),
color=cmap(128))
for x in range(width):
for y in range(height):
_x = x + 1
_y = y + 1
_blob(
_x - 0.5, height - _y + 0.5, W[x, y], w_max,
min(1, abs(W[x, y]) / w_max), color_fn=color_fn, ax=ax)
# color axis
vmax = np.pi if color_style == "phase" else abs(W).max()
norm = mpl.colors.Normalize(-vmax, vmax)
cax, kw = mpl.colorbar.make_axes(ax, shrink=0.75, pad=.1)
mpl.colorbar.ColorbarBase(cax, norm=norm, cmap=cmap)
xtics = 0.5 + np.arange(width)
# x axis
ax.xaxis.set_major_locator(plt.FixedLocator(xtics))
if xlabels:
nxlabels = len(xlabels)
if nxlabels != len(xtics):
raise ValueError(f"got {nxlabels} xlabels but needed {len(xtics)}")
ax.set_xticklabels(xlabels)
if label_top:
ax.xaxis.tick_top()
ax.tick_params(axis='x', labelsize=14)
# y axis
ytics = 0.5 + np.arange(height)
ax.yaxis.set_major_locator(plt.FixedLocator(ytics))
if ylabels:
nylabels = len(ylabels)
if nylabels != len(ytics):
raise ValueError(f"got {nylabels} ylabels but needed {len(ytics)}")
ax.set_yticklabels(list(reversed(ylabels)))
ax.tick_params(axis='y', labelsize=14)
return fig, ax
def sphereplot(theta, phi, values, fig=None, ax=None, save=False):
"""Plots a matrix of values on a sphere
Parameters
----------
theta : float
Angle with respect to z-axis
phi : float
Angle in x-y plane
values : array
Data set to be plotted
fig : a matplotlib Figure instance
The Figure canvas in which the plot will be drawn.
ax : a matplotlib axes instance
The axes context in which the plot will be drawn.
save : bool {False , True}
Whether to save the figure or not
Returns
-------
fig, ax : tuple
A tuple of the matplotlib figure and axes instances used to produce
the figure.
"""
if fig is None or ax is None:
fig = plt.figure()
ax = _axes3D(fig)
thetam, phim = np.meshgrid(theta, phi)
xx = sin(thetam) * cos(phim)
yy = sin(thetam) * sin(phim)
zz = cos(thetam)
r = array(abs(values))
ph = angle(values)
# normalize color range based on phase angles in list ph
nrm = mpl.colors.Normalize(ph.min(), ph.max())
# plot with facecolors set to cm.jet colormap normalized to nrm
ax.plot_surface(r * xx, r * yy, r * zz, rstride=1, cstride=1,
facecolors=cm.jet(nrm(ph)), linewidth=0)
# create new axes on plot for colorbar and shrink it a bit.
# pad shifts location of bar with repsect to the main plot
cax, kw = mpl.colorbar.make_axes(ax, shrink=.66, pad=.02)
# create new colorbar in axes cax with cm jet and normalized to nrm like
# our facecolors
cb1 = mpl.colorbar.ColorbarBase(cax, cmap=cm.jet, norm=nrm)
# add our colorbar label
cb1.set_label('Angle')
if save:
plt.savefig("sphereplot.png")
return fig, ax
def _remove_margins(axis):
"""
removes margins about z = 0 and improves the style
by monkey patching
"""
def _get_coord_info_new(renderer):
mins, maxs, centers, deltas, tc, highs = \
_get_coord_info_old(renderer)
mins += deltas / 4
maxs -= deltas / 4
return mins, maxs, centers, deltas, tc, highs
_get_coord_info_old = axis._get_coord_info
axis._get_coord_info = _get_coord_info_new
def _truncate_colormap(cmap, minval=0.0, maxval=1.0, n=100):
"""
truncates portion of a colormap and returns the new one
"""
if isinstance(cmap, str):
cmap = plt.get_cmap(cmap)
new_cmap = mpl.colors.LinearSegmentedColormap.from_list(
'trunc({n},{a:.2f},{b:.2f})'.format(
n=cmap.name, a=minval, b=maxval),
cmap(np.linspace(minval, maxval, n)))
return new_cmap
def _stick_to_planes(stick, azim, ax, M, spacing):
"""adjusts xlim and ylim in way that bars will
Stick to xz and yz planes
"""
if stick is True:
azim = azim % 360
if 0 <= azim <= 90:
ax.set_ylim(1 - .5,)
ax.set_xlim(1 - .5,)
elif 90 < azim <= 180:
ax.set_ylim(1 - .5,)
ax.set_xlim(0, M.shape[0] + (.5 - spacing))
elif 180 < azim <= 270:
ax.set_ylim(0, M.shape[1] + (.5 - spacing))
ax.set_xlim(0, M.shape[0] + (.5 - spacing))
elif 270 < azim < 360:
ax.set_ylim(0, M.shape[1] + (.5 - spacing))
ax.set_xlim(1 - .5,)
def _update_yaxis(spacing, M, ax, ylabels):
"""
updates the y-axis
"""
ytics = [x + (1 - (spacing / 2)) for x in range(M.shape[1])]
ax.axes.w_yaxis.set_major_locator(plt.FixedLocator(ytics))
if ylabels:
nylabels = len(ylabels)
if nylabels != len(ytics):
raise ValueError(f"got {nylabels} ylabels but needed {len(ytics)}")
ax.set_yticklabels(ylabels)
else:
ax.set_yticklabels([str(y + 1) for y in range(M.shape[1])])
ax.set_yticklabels([str(i) for i in range(M.shape[1])])
ax.tick_params(axis='y', labelsize=14)
ax.set_yticks([y + (1 - (spacing / 2)) for y in range(M.shape[1])])
def _update_xaxis(spacing, M, ax, xlabels):
"""
updates the x-axis
"""
xtics = [x + (1 - (spacing / 2)) for x in range(M.shape[1])]
ax.axes.w_xaxis.set_major_locator(plt.FixedLocator(xtics))
if xlabels:
nxlabels = len(xlabels)
if nxlabels != len(xtics):
raise ValueError(f"got {nxlabels} xlabels but needed {len(xtics)}")
ax.set_xticklabels(xlabels)
else:
ax.set_xticklabels([str(x + 1) for x in range(M.shape[0])])
ax.set_xticklabels([str(i) for i in range(M.shape[0])])
ax.tick_params(axis='x', labelsize=14)
ax.set_xticks([x + (1 - (spacing / 2)) for x in range(M.shape[0])])
def _update_zaxis(ax, z_min, z_max, zticks):
"""
updates the z-axis
"""
ax.axes.w_zaxis.set_major_locator(plt.IndexLocator(1, 0.5))
if isinstance(zticks, list):
ax.set_zticks(zticks)
ax.set_zlim3d([min(z_min, 0), z_max])
def matrix_histogram(M, xlabels=None, ylabels=None, title=None, limits=None,
colorbar=True, fig=None, ax=None, options=None):
"""
Draw a histogram for the matrix M, with the given x and y labels and title.
Parameters
----------
M : Matrix of Qobj
The matrix to visualize
xlabels : list of strings
list of x labels
ylabels : list of strings
list of y labels
title : string
title of the plot (optional)
limits : list/array with two float numbers
The z-axis limits [min, max] (optional)
ax : a matplotlib axes instance
The axes context in which the plot will be drawn.
colorbar : bool (default: True)
show colorbar
options : dict
A dictionary containing extra options for the plot.
The names (keys) and values of the options are
described below:
'zticks' : list of numbers
A list of z-axis tick locations.
'cmap' : string (default: 'jet')
The name of the color map to use.
'cmap_min' : float (default: 0.0)
The lower bound to truncate the color map at.
A value in range 0 - 1. The default, 0, leaves the lower
bound of the map unchanged.
'cmap_max' : float (default: 1.0)
The upper bound to truncate the color map at.
A value in range 0 - 1. The default, 1, leaves the upper
bound of the map unchanged.
'bars_spacing' : float (default: 0.1)
spacing between bars.
'bars_alpha' : float (default: 1.)
transparency of bars, should be in range 0 - 1
'bars_lw' : float (default: 0.5)
linewidth of bars' edges.
'bars_edgecolor' : color (default: 'k')
The colors of the bars' edges.
Examples: 'k', (0.1, 0.2, 0.5) or '#0f0f0f80'.
'shade' : bool (default: True)
Whether to shade the dark sides of the bars (True) or not (False).
The shading is relative to plot's source of light.
'azim' : float
The azimuthal viewing angle.
'elev' : float
The elevation viewing angle.
'proj_type' : string (default: 'ortho' if ax is not passed)
The type of projection ('ortho' or 'persp')
'stick' : bool (default: False)
Changes xlim and ylim in such a way that bars next to
XZ and YZ planes will stick to those planes.
This option has no effect if ``ax`` is passed as a parameter.
'cbar_pad' : float (default: 0.04)
The fraction of the original axes between the colorbar
and the new image axes.
(i.e. the padding between the 3D figure and the colorbar).
'cbar_to_z' : bool (default: False)
Whether to set the color of maximum and minimum z-values to the
maximum and minimum colors in the colorbar (True) or not (False).
'figsize' : tuple of two numbers
The size of the figure.
Returns :
-------
fig, ax : tuple
A tuple of the matplotlib figure and axes instances used to produce
the figure.
Raises
------
ValueError
Input argument is not valid.
"""
# default options
default_opts = {'figsize': None, 'cmap': 'jet', 'cmap_min': 0.,
'cmap_max': 1., 'zticks': None, 'bars_spacing': 0.2,
'bars_alpha': 1., 'bars_lw': 0.5, 'bars_edgecolor': 'k',
'shade': False, 'azim': -35, 'elev': 35,
'proj_type': 'ortho', 'stick': False,
'cbar_pad': 0.04, 'cbar_to_z': False}
# update default_opts from input options
if options is None:
pass
elif isinstance(options, dict):
# check if keys in options dict are valid
if set(options) - set(default_opts):
raise ValueError("invalid key(s) found in options: "
f"{', '.join(set(options) - set(default_opts))}")
else:
# updating default options
default_opts.update(options)
else:
raise ValueError("options must be a dictionary")
if isinstance(M, Qobj):
# extract matrix data from Qobj
M = M.full()
n = np.size(M)
xpos, ypos = np.meshgrid(range(M.shape[0]), range(M.shape[1]))
xpos = xpos.T.flatten() + 0.5
ypos = ypos.T.flatten() + 0.5
zpos = np.zeros(n)
dx = dy = (1 - default_opts['bars_spacing']) * np.ones(n)
dz = np.real(M.flatten())
if isinstance(limits, list) and len(limits) == 2:
z_min = limits[0]
z_max = limits[1]
else:
z_min = min(dz)
z_max = max(dz)
if z_min == z_max:
z_min -= 0.1
z_max += 0.1
if default_opts['cbar_to_z']:
norm = mpl.colors.Normalize(min(dz), max(dz))
else:
norm = mpl.colors.Normalize(z_min, z_max)
cmap = _truncate_colormap(default_opts['cmap'],
default_opts['cmap_min'],
default_opts['cmap_max'])
colors = cmap(norm(dz))
if ax is None:
fig = plt.figure(figsize=default_opts['figsize'])
ax = _axes3D(fig,
azim=default_opts['azim'] % 360,
elev=default_opts['elev'] % 360)
ax.set_proj_type(default_opts['proj_type'])
ax.bar3d(xpos, ypos, zpos, dx, dy, dz, color=colors,
edgecolors=default_opts['bars_edgecolor'],
linewidths=default_opts['bars_lw'],
alpha=default_opts['bars_alpha'],
shade=default_opts['shade'])
# remove vertical lines on xz and yz plane
ax.yaxis._axinfo["grid"]['linewidth'] = 0
ax.xaxis._axinfo["grid"]['linewidth'] = 0
if title:
ax.set_title(title)
# x axis
_update_xaxis(default_opts['bars_spacing'], M, ax, xlabels)
# y axis
_update_yaxis(default_opts['bars_spacing'], M, ax, ylabels)
# z axis
_update_zaxis(ax, z_min, z_max, default_opts['zticks'])
# stick to xz and yz plane
_stick_to_planes(default_opts['stick'],
default_opts['azim'], ax, M,
default_opts['bars_spacing'])
# color axis
if colorbar:
cax, kw = mpl.colorbar.make_axes(ax, shrink=.75,
pad=default_opts['cbar_pad'])
mpl.colorbar.ColorbarBase(cax, cmap=cmap, norm=norm)
# removing margins
_remove_margins(ax.xaxis)
_remove_margins(ax.yaxis)
_remove_margins(ax.zaxis)
return fig, ax
def matrix_histogram_complex(M, xlabels=None, ylabels=None,
title=None, limits=None, phase_limits=None,
colorbar=True, fig=None, ax=None,
threshold=None):
"""
Draw a histogram for the amplitudes of matrix M, using the argument
of each element for coloring the bars, with the given x and y labels
and title.
Parameters
----------
M : Matrix of Qobj
The matrix to visualize
xlabels : list of strings
list of x labels
ylabels : list of strings
list of y labels
title : string
title of the plot (optional)
limits : list/array with two float numbers
The z-axis limits [min, max] (optional)
phase_limits : list/array with two float numbers
The phase-axis (colorbar) limits [min, max] (optional)
ax : a matplotlib axes instance
The axes context in which the plot will be drawn.
threshold: float (None)
Threshold for when bars of smaller height should be transparent. If
not set, all bars are colored according to the color map.
Returns
-------
fig, ax : tuple
A tuple of the matplotlib figure and axes instances used to produce
the figure.
Raises
------
ValueError
Input argument is not valid.
"""
if isinstance(M, Qobj):
# extract matrix data from Qobj
M = M.full()
n = np.size(M)
xpos, ypos = np.meshgrid(range(M.shape[0]), range(M.shape[1]))
xpos = xpos.T.flatten() - 0.5
ypos = ypos.T.flatten() - 0.5
zpos = np.zeros(n)
dx = dy = 0.8 * np.ones(n)
Mvec = M.flatten()
dz = abs(Mvec)
# make small numbers real, to avoid random colors
idx, = np.where(abs(Mvec) < 0.001)
Mvec[idx] = abs(Mvec[idx])
if phase_limits: # check that limits is a list type
phase_min = phase_limits[0]
phase_max = phase_limits[1]
else:
phase_min = -pi
phase_max = pi
norm = mpl.colors.Normalize(phase_min, phase_max)
cmap = complex_phase_cmap()
colors = cmap(norm(angle(Mvec)))
if threshold is not None:
colors[:, 3] = 1 * (dz > threshold)
if ax is None:
fig = plt.figure()
ax = _axes3D(fig, azim=-35, elev=35)
ax.bar3d(xpos, ypos, zpos, dx, dy, dz, color=colors)
if title:
ax.set_title(title)
# x axis
xtics = -0.5 + np.arange(M.shape[0])
ax.axes.w_xaxis.set_major_locator(plt.FixedLocator(xtics))
if xlabels:
nxlabels = len(xlabels)
if nxlabels != len(xtics):
raise ValueError(f"got {nxlabels} xlabels but needed {len(xtics)}")
ax.set_xticklabels(xlabels)
ax.tick_params(axis='x', labelsize=12)
# y axis
ytics = -0.5 + np.arange(M.shape[1])
ax.axes.w_yaxis.set_major_locator(plt.FixedLocator(ytics))
if ylabels:
nylabels = len(ylabels)
if nylabels != len(ytics):
raise ValueError(f"got {nylabels} ylabels but needed {len(ytics)}")
ax.set_yticklabels(ylabels)
ax.tick_params(axis='y', labelsize=12)
# z axis
if limits and isinstance(limits, list):
ax.set_zlim3d(limits)
else:
ax.set_zlim3d([0, 1]) # use min/max
# ax.set_zlabel('abs')
# color axis
if colorbar:
cax, kw = mpl.colorbar.make_axes(ax, shrink=.75, pad=.0)
cb = mpl.colorbar.ColorbarBase(cax, cmap=cmap, norm=norm)
cb.set_ticks([-pi, -pi / 2, 0, pi / 2, pi])
cb.set_ticklabels(
(r'$-\pi$', r'$-\pi/2$', r'$0$', r'$\pi/2$', r'$\pi$'))
cb.set_label('arg')
return fig, ax
def plot_energy_levels(H_list, N=0, labels=None, show_ylabels=False,
figsize=(8, 12), fig=None, ax=None):
"""
Plot the energy level diagrams for a list of Hamiltonians. Include
up to N energy levels. For each element in H_list, the energy
levels diagram for the cummulative Hamiltonian sum(H_list[0:n]) is plotted,
where n is the index of an element in H_list.
Parameters
----------
H_list : List of Qobj
A list of Hamiltonians.
labels : List of string
A list of labels for each Hamiltonian
show_ylabels : Bool (default False)
Show y labels to the left of energy levels of the initial
Hamiltonian.
N : int
The number of energy levels to plot
figsize : tuple (int,int)
The size of the figure (width, height).
fig : a matplotlib Figure instance
The Figure canvas in which the plot will be drawn.
ax : a matplotlib axes instance
The axes context in which the plot will be drawn.
Returns
-------
fig, ax : tuple
A tuple of the matplotlib figure and axes instances used to produce
the figure.
Raises
------
ValueError
Input argument is not valid.
"""
if not isinstance(H_list, list):
raise ValueError("H_list must be a list of Qobj instances")
if not fig and not ax:
fig, ax = plt.subplots(1, 1, figsize=figsize)
H = H_list[0]
N = H.shape[0] if N == 0 else min(H.shape[0], N)
xticks = []
yticks = []
x = 0
evals0 = H.eigenenergies(eigvals=N)
for e_idx, e in enumerate(evals0[:N]):
ax.plot([x, x + 2], np.array([1, 1]) * e, 'b', linewidth=2)
yticks.append(e)
xticks.append(x + 1)
x += 2
for H1 in H_list[1:]:
H = H + H1
evals1 = H.eigenenergies()
for e_idx, e in enumerate(evals1[:N]):
ax.plot([x, x + 1], np.array([evals0[e_idx], e]), 'k:')
x += 1
for e_idx, e in enumerate(evals1[:N]):
ax.plot([x, x + 2], np.array([1, 1]) * e, 'b', linewidth=2)
xticks.append(x + 1)
x += 2
evals0 = evals1
ax.set_frame_on(False)
if show_ylabels:
yticks = np.unique(np.around(yticks, 1))
ax.set_yticks(yticks)
else:
ax.axes.get_yaxis().set_visible(False)
if labels:
ax.get_xaxis().tick_bottom()
ax.set_xticks(xticks)
ax.set_xticklabels(labels, fontsize=16)
else:
ax.axes.get_xaxis().set_visible(False)
return fig, ax
def energy_level_diagram(H_list, N=0, labels=None, show_ylabels=False,
figsize=(8, 12), fig=None, ax=None):
warnings.warn("Deprecated: Use plot_energy_levels")
return plot_energy_levels(H_list, N=N, labels=labels,
show_ylabels=show_ylabels,
figsize=figsize, fig=fig, ax=ax)
def plot_fock_distribution(rho, offset=0, fig=None, ax=None,
figsize=(8, 6), title=None, unit_y_range=True):
"""
Plot the Fock distribution for a density matrix (or ket) that describes
an oscillator mode.
Parameters
----------
rho : :class:`qutip.qobj.Qobj`
The density matrix (or ket) of the state to visualize.
fig : a matplotlib Figure instance
The Figure canvas in which the plot will be drawn.
ax : a matplotlib axes instance
The axes context in which the plot will be drawn.
title : string
An optional title for the figure.
figsize : (width, height)
The size of the matplotlib figure (in inches) if it is to be created
(that is, if no 'fig' and 'ax' arguments are passed).
Returns
-------
fig, ax : tuple
A tuple of the matplotlib figure and axes instances used to produce
the figure.
"""
if not fig and not ax:
fig, ax = plt.subplots(1, 1, figsize=figsize)
if isket(rho):
rho = ket2dm(rho)
N = rho.shape[0]
ax.bar(np.arange(offset, offset + N), np.real(rho.diag()),
color="green", alpha=0.6, width=0.8)
if unit_y_range:
ax.set_ylim(0, 1)
ax.set_xlim(-.5 + offset, N + offset)
ax.set_xlabel('Fock number', fontsize=12)
ax.set_ylabel('Occupation probability', fontsize=12)
if title:
ax.set_title(title)
return fig, ax
def fock_distribution(rho, offset=0, fig=None, ax=None,
figsize=(8, 6), title=None, unit_y_range=True):
warnings.warn("Deprecated: Use plot_fock_distribution")
return plot_fock_distribution(rho, offset=offset, fig=fig, ax=ax,
figsize=figsize, title=title,
unit_y_range=unit_y_range)
def plot_wigner(rho, fig=None, ax=None, figsize=(6, 6),
cmap=None, alpha_max=7.5, colorbar=False,
method='clenshaw', projection='2d'):
"""
Plot the the Wigner function for a density matrix (or ket) that describes
an oscillator mode.
Parameters
----------
rho : :class:`qutip.qobj.Qobj`
The density matrix (or ket) of the state to visualize.
fig : a matplotlib Figure instance
The Figure canvas in which the plot will be drawn.
ax : a matplotlib axes instance
The axes context in which the plot will be drawn.
figsize : (width, height)
The size of the matplotlib figure (in inches) if it is to be created
(that is, if no 'fig' and 'ax' arguments are passed).
cmap : a matplotlib cmap instance
The colormap.
alpha_max : float
The span of the x and y coordinates (both [-alpha_max, alpha_max]).
colorbar : bool
Whether (True) or not (False) a colorbar should be attached to the
Wigner function graph.
method : string {'clenshaw', 'iterative', 'laguerre', 'fft'}
The method used for calculating the wigner function. See the
documentation for qutip.wigner for details.
projection: string {'2d', '3d'}
Specify whether the Wigner function is to be plotted as a
contour graph ('2d') or surface plot ('3d').
Returns
-------
fig, ax : tuple
A tuple of the matplotlib figure and axes instances used to produce
the figure.
"""
if not fig and not ax:
if projection == '2d':
fig, ax = plt.subplots(1, 1, figsize=figsize)
elif projection == '3d':
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot(1, 1, 1, projection='3d')
else:
raise ValueError('Unexpected value of projection keyword argument')
if isket(rho):
rho = ket2dm(rho)
xvec = np.linspace(-alpha_max, alpha_max, 200)
W0 = wigner(rho, xvec, xvec, method=method)
W, yvec = W0 if isinstance(W0, tuple) else (W0, xvec)
wlim = abs(W).max()
if cmap is None:
cmap = cm.get_cmap('RdBu')
if projection == '2d':
cf = ax.contourf(xvec, yvec, W, 100,
norm=mpl.colors.Normalize(-wlim, wlim), cmap=cmap)
elif projection == '3d':
X, Y = np.meshgrid(xvec, xvec)
cf = ax.plot_surface(X, Y, W0, rstride=5, cstride=5, linewidth=0.5,
norm=mpl.colors.Normalize(-wlim, wlim), cmap=cmap)
else:
raise ValueError('Unexpected value of projection keyword argument.')
if xvec is not yvec:
ax.set_ylim(xvec.min(), xvec.max())
ax.set_xlabel(r'$\rm{Re}(\alpha)$', fontsize=12)
ax.set_ylabel(r'$\rm{Im}(\alpha)$', fontsize=12)
if colorbar:
fig.colorbar(cf, ax=ax)
ax.set_title("Wigner function", fontsize=12)
return fig, ax
def plot_wigner_fock_distribution(rho, fig=None, axes=None, figsize=(8, 4),
cmap=None, alpha_max=7.5, colorbar=False,
method='iterative', projection='2d'):
"""
Plot the Fock distribution and the Wigner function for a density matrix
(or ket) that describes an oscillator mode.
Parameters
----------
rho : :class:`qutip.qobj.Qobj`
The density matrix (or ket) of the state to visualize.
fig : a matplotlib Figure instance
The Figure canvas in which the plot will be drawn.
axes : a list of two matplotlib axes instances
The axes context in which the plot will be drawn.
figsize : (width, height)
The size of the matplotlib figure (in inches) if it is to be created
(that is, if no 'fig' and 'ax' arguments are passed).
cmap : a matplotlib cmap instance
The colormap.
alpha_max : float
The span of the x and y coordinates (both [-alpha_max, alpha_max]).
colorbar : bool
Whether (True) or not (False) a colorbar should be attached to the
Wigner function graph.
method : string {'iterative', 'laguerre', 'fft'}
The method used for calculating the wigner function. See the
documentation for qutip.wigner for details.
projection: string {'2d', '3d'}
Specify whether the Wigner function is to be plotted as a
contour graph ('2d') or surface plot ('3d').
Returns
-------
fig, ax : tuple
A tuple of the matplotlib figure and axes instances used to produce
the figure.
"""
if not fig and not axes:
if projection == '2d':
fig, axes = plt.subplots(1, 2, figsize=figsize)
elif projection == '3d':
fig = plt.figure(figsize=figsize)
axes = [fig.add_subplot(1, 2, 1),
fig.add_subplot(1, 2, 2, projection='3d')]
else:
raise ValueError('Unexpected value of projection keyword argument')
if isket(rho):
rho = ket2dm(rho)
plot_fock_distribution(rho, fig=fig, ax=axes[0])
plot_wigner(rho, fig=fig, ax=axes[1], figsize=figsize, cmap=cmap,
alpha_max=alpha_max, colorbar=colorbar, method=method,
projection=projection)
return fig, axes
def wigner_fock_distribution(rho, fig=None, axes=None, figsize=(8, 4),
cmap=None, alpha_max=7.5, colorbar=False,
method='iterative'):
warnings.warn("Deprecated: Use plot_wigner_fock_distribution")
return plot_wigner_fock_distribution(rho, fig=fig, axes=axes,
figsize=figsize, cmap=cmap,
alpha_max=alpha_max,
colorbar=colorbar,
method=method)
def plot_expectation_values(results, ylabels=[], title=None, show_legend=False,
fig=None, axes=None, figsize=(8, 4)):
"""
Visualize the results (expectation values) for an evolution solver.
`results` is assumed to be an instance of Result, or a list of Result
instances.
Parameters
----------
results : (list of) :class:`qutip.solver.Result`
List of results objects returned by any of the QuTiP evolution solvers.
ylabels : list of strings
The y-axis labels. List should be of the same length as `results`.
title : string
The title of the figure.
show_legend : bool
Whether or not to show the legend.
fig : a matplotlib Figure instance
The Figure canvas in which the plot will be drawn.
axes : a matplotlib axes instance
The axes context in which the plot will be drawn.
figsize : (width, height)
The size of the matplotlib figure (in inches) if it is to be created
(that is, if no 'fig' and 'ax' arguments are passed).
Returns
-------
fig, ax : tuple
A tuple of the matplotlib figure and axes instances used to produce
the figure.
"""
if not isinstance(results, list):
results = [results]
n_e_ops = max([len(result.expect) for result in results])
if not fig or not axes:
if not figsize:
figsize = (12, 3 * n_e_ops)
fig, axes = plt.subplots(n_e_ops, 1, sharex=True,
figsize=figsize, squeeze=False)
for r_idx, result in enumerate(results):
for e_idx, e in enumerate(result.expect):
axes[e_idx, 0].plot(result.times, e,
label="%s [%d]" % (result.solver, e_idx))
if title:
fig.suptitle(title)
axes[n_e_ops - 1, 0].set_xlabel("time", fontsize=12)
for n in range(n_e_ops):
if show_legend:
axes[n, 0].legend()
if ylabels:
axes[n, 0].set_ylabel(ylabels[n], fontsize=12)
return fig, axes
def plot_spin_distribution_2d(P, THETA, PHI,
fig=None, ax=None, figsize=(8, 8)):
"""
Plot a spin distribution function (given as meshgrid data) with a 2D
projection where the surface of the unit sphere is mapped on the unit disk.
Parameters
----------
P : matrix
Distribution values as a meshgrid matrix.
THETA : matrix
Meshgrid matrix for the theta coordinate.
PHI : matrix
Meshgrid matrix for the phi coordinate.
fig : a matplotlib figure instance
The figure canvas on which the plot will be drawn.
ax : a matplotlib axis instance
The axis context in which the plot will be drawn.
figsize : (width, height)
The size of the matplotlib figure (in inches) if it is to be created
(that is, if no 'fig' and 'ax' arguments are passed).
Returns
-------
fig, ax : tuple
A tuple of the matplotlib figure and axes instances used to produce
the figure.
"""
if not fig or not ax:
if not figsize:
figsize = (8, 8)
fig, ax = plt.subplots(1, 1, figsize=figsize)
Y = (THETA - pi / 2) / (pi / 2)
X = (pi - PHI) / pi * np.sqrt(cos(THETA - pi / 2))
if P.min() < -1e12:
cmap = cm.RdBu
else:
cmap = cm.RdYlBu
ax.pcolor(X, Y, P.real, cmap=cmap)
ax.set_xlabel(r'$\varphi$', fontsize=18)
ax.set_ylabel(r'$\theta$', fontsize=18)
ax.set_xticks([-1, 0, 1])
ax.set_xticklabels([r'$0$', r'$\pi$', r'$2\pi$'], fontsize=18)
ax.set_yticks([-1, 0, 1])
ax.set_yticklabels([r'$\pi$', r'$\pi/2$', r'$0$'], fontsize=18)
return fig, ax
def plot_spin_distribution_3d(P, THETA, PHI,
fig=None, ax=None, figsize=(8, 6)):
"""Plots a matrix of values on a sphere
Parameters
----------
P : matrix
Distribution values as a meshgrid matrix.
THETA : matrix
Meshgrid matrix for the theta coordinate.
PHI : matrix
Meshgrid matrix for the phi coordinate.
fig : a matplotlib figure instance
The figure canvas on which the plot will be drawn.
ax : a matplotlib axis instance
The axis context in which the plot will be drawn.
figsize : (width, height)
The size of the matplotlib figure (in inches) if it is to be created
(that is, if no 'fig' and 'ax' arguments are passed).
Returns
-------
fig, ax : tuple
A tuple of the matplotlib figure and axes instances used to produce
the figure.
"""
if fig is None or ax is None:
fig = plt.figure(figsize=figsize)
ax = _axes3D(fig, azim=-35, elev=35)
xx = sin(THETA) * cos(PHI)
yy = sin(THETA) * sin(PHI)
zz = cos(THETA)
if P.min() < -1e12:
cmap = cm.RdBu
norm = mpl.colors.Normalize(-P.max(), P.max())
else:
cmap = cm.RdYlBu
norm = mpl.colors.Normalize(P.min(), P.max())
ax.plot_surface(xx, yy, zz, rstride=1, cstride=1,
facecolors=cmap(norm(P)), linewidth=0)
cax, kw = mpl.colorbar.make_axes(ax, shrink=.66, pad=.02)
cb1 = mpl.colorbar.ColorbarBase(cax, cmap=cmap, norm=norm)
cb1.set_label('magnitude')
return fig, ax
#
# Qubism and other qubistic visualizations
#
def complex_array_to_rgb(X, theme='light', rmax=None):
"""
Makes an array of complex number and converts it to an array of [r, g, b],
where phase gives hue and saturation/value are given by the absolute value.
Especially for use with imshow for complex plots.
For more info on coloring, see:
Emilia Petrisor,
Visualizing complex-valued functions with Matplotlib and Mayavi
https://nbviewer.ipython.org/github/empet/Math/blob/master/DomainColoring.ipynb
Parameters
----------
X : array
Array (of any dimension) of complex numbers.
theme : 'light' (default) or 'dark'
Set coloring theme for mapping complex values into colors.
rmax : float
Maximal abs value for color normalization.
If None (default), uses np.abs(X).max().
Returns
-------
Y : array
Array of colors (of shape X.shape + (3,)).
"""
absmax = rmax or np.abs(X).max()
if absmax == 0.:
absmax = 1.
Y = np.zeros(X.shape + (3,), dtype='float')
Y[..., 0] = np.angle(X) / (2 * pi) % 1
if theme == 'light':
Y[..., 1] = np.clip(np.abs(X) / absmax, 0, 1)
Y[..., 2] = 1
elif theme == 'dark':
Y[..., 1] = 1
Y[..., 2] = np.clip(np.abs(X) / absmax, 0, 1)
Y = mpl.colors.hsv_to_rgb(Y)
return Y
def _index_to_sequence(i, dim_list):
"""
For a matrix entry with index i it returns state it corresponds to.
In particular, for dim_list=[2]*n it returns i written as a binary number.
Parameters
----------
i : int
Index in a matrix.
dim_list : list of int
List of dimensions of consecutive particles.
Returns
-------
seq : list
List of coordinates for each particle.
"""
res = []
j = i
for d in reversed(dim_list):
j, s = divmod(j, d)
res.append(s)
return list(reversed(res))
def _sequence_to_index(seq, dim_list):
"""
Inverse of _index_to_sequence.
Parameters
----------
seq : list of ints
List of coordinates for each particle.
dim_list : list of int
List of dimensions of consecutive particles.
Returns
-------
i : list
Index in a matrix.
"""
i = 0
for s, d in zip(seq, dim_list):
i *= d
i += s
return i
def _to_qubism_index_pair(i, dim_list, how='pairs'):
"""
For a matrix entry with index i
it returns x, y coordinates in qubism mapping.
Parameters
----------
i : int
Index in a matrix.
dim_list : list of int
List of dimensions of consecutive particles.
how : 'pairs' ('default'), 'pairs_skewed' or 'before_after'
Type of qubistic plot.
Returns
-------
x, y : tuple of ints
List of coordinates for each particle.
"""
seq = _index_to_sequence(i, dim_list)
if how == 'pairs':
y = _sequence_to_index(seq[::2], dim_list[::2])
x = _sequence_to_index(seq[1::2], dim_list[1::2])
elif how == 'pairs_skewed':
dim_list2 = dim_list[::2]
y = _sequence_to_index(seq[::2], dim_list2)
seq2 = [(b - a) % d for a, b, d in zip(seq[::2], seq[1::2], dim_list2)]
x = _sequence_to_index(seq2, dim_list2)
elif how == 'before_after':
# https://en.wikipedia.org/wiki/File:Ising-tartan.png
n = len(dim_list)
y = _sequence_to_index(reversed(seq[:(n // 2)]),
reversed(dim_list[:(n // 2)]))
x = _sequence_to_index(seq[(n // 2):], dim_list[(n // 2):])
else:
raise Exception("No such 'how'.")
return x, y
def _sequence_to_latex(seq, style='ket'):
"""
For a sequence of particle states generate LaTeX code.
Parameters
----------
seq : list of ints
List of coordinates for each particle.
style : 'ket' (default), 'bra' or 'bare'
Style of LaTeX (i.e. |01> or <01| or 01, respectively).
Returns
-------
latex : str
LaTeX output.
"""
if style == 'ket':
latex = "$\\left|{0}\\right\\rangle$"
elif style == 'bra':
latex = "$\\left\\langle{0}\\right|$"
elif style == 'bare':
latex = "${0}$"
else:
raise Exception("No such style.")
return latex.format("".join(map(str, seq)))
def plot_qubism(ket, theme='light', how='pairs',
grid_iteration=1, legend_iteration=0,
fig=None, ax=None, figsize=(6, 6)):
"""
Qubism plot for pure states of many qudits. Works best for spin chains,
especially with even number of particles of the same dimension. Allows to
see entanglement between first 2k particles and the rest.
Parameters
----------
ket : Qobj
Pure state for plotting.
theme : 'light' (default) or 'dark'
Set coloring theme for mapping complex values into colors.
See: complex_array_to_rgb.
how : 'pairs' (default), 'pairs_skewed' or 'before_after'
Type of Qubism plotting. Options:
- 'pairs' - typical coordinates,
- 'pairs_skewed' - for ferromagnetic/antriferromagnetic plots,
- 'before_after' - related to Schmidt plot (see also: plot_schmidt).
grid_iteration : int (default 1)
Helper lines to be drawn on plot.
Show tiles for 2*grid_iteration particles vs all others.
legend_iteration : int (default 0) or 'grid_iteration' or 'all'
Show labels for first ``2*legend_iteration`` particles. Option
'grid_iteration' sets the same number of particles as for
grid_iteration. Option 'all' makes label for all particles. Typically
it should be 0, 1, 2 or perhaps 3.
fig : a matplotlib figure instance
The figure canvas on which the plot will be drawn.
ax : a matplotlib axis instance
The axis context in which the plot will be drawn.
figsize : (width, height)
The size of the matplotlib figure (in inches) if it is to be created
(that is, if no 'fig' and 'ax' arguments are passed).
Returns
-------
fig, ax : tuple
A tuple of the matplotlib figure and axes instances used to produce
the figure.
Notes
-----
See also [1]_.
References
----------
.. [1] J. Rodriguez-Laguna, P. Migdal, M. Ibanez Berganza, M. Lewenstein
and G. Sierra, *Qubism: self-similar visualization of many-body
wavefunctions*, `New J. Phys. 14 053028
<https://dx.doi.org/10.1088/1367-2630/14/5/053028>`_, arXiv:1112.3560
(2012), open access.
"""
if not isket(ket):
raise Exception("Qubism works only for pure states, i.e. kets.")
# add for dm? (perhaps a separate function, plot_qubism_dm)
if not fig and not ax:
fig, ax = plt.subplots(1, 1, figsize=figsize)
dim_list = ket.dims[0]
n = len(dim_list)
# for odd number of particles - pixels are rectangular
if n % 2 == 1:
ket = tensor(ket, Qobj([1] * dim_list[-1]))
dim_list = ket.dims[0]
n += 1
ketdata = ket.full()
if how == 'pairs':
dim_list_y = dim_list[::2]
dim_list_x = dim_list[1::2]
elif how == 'pairs_skewed':
dim_list_y = dim_list[::2]
dim_list_x = dim_list[1::2]
if dim_list_x != dim_list_y:
raise Exception("For 'pairs_skewed' pairs " +
"of dimensions need to be the same.")
elif how == 'before_after':
dim_list_y = list(reversed(dim_list[:(n // 2)]))
dim_list_x = dim_list[(n // 2):]
else:
raise Exception("No such 'how'.")
size_x = np.prod(dim_list_x)
size_y = np.prod(dim_list_y)
qub = np.zeros([size_x, size_y], dtype=complex)
for i in range(ketdata.size):
qub[_to_qubism_index_pair(i, dim_list, how=how)] = ketdata[i, 0]
qub = qub.transpose()
quadrants_x = np.prod(dim_list_x[:grid_iteration])
quadrants_y = np.prod(dim_list_y[:grid_iteration])
ticks_x = [size_x // quadrants_x * i for i in range(1, quadrants_x)]
ticks_y = [size_y // quadrants_y * i for i in range(1, quadrants_y)]
ax.set_xticks(ticks_x)
ax.set_xticklabels([""] * (quadrants_x - 1))
ax.set_yticks(ticks_y)
ax.set_yticklabels([""] * (quadrants_y - 1))
theme2color_of_lines = {'light': '#000000',
'dark': '#FFFFFF'}
ax.grid(True, color=theme2color_of_lines[theme])
ax.imshow(complex_array_to_rgb(qub, theme=theme),
interpolation="none",
extent=(0, size_x, 0, size_y))
if legend_iteration == 'all':
label_n = n // 2
elif legend_iteration == 'grid_iteration':
label_n = grid_iteration
else:
try:
label_n = int(legend_iteration)
except:
raise Exception("No such option for legend_iteration keyword " +
"argument. Use 'all', 'grid_iteration' or an " +
"integer.")
if label_n:
if how == 'before_after':
dim_list_small = list(reversed(dim_list_y[-label_n:])) \
+ dim_list_x[:label_n]
else:
dim_list_small = []
for j in range(label_n):
dim_list_small.append(dim_list_y[j])
dim_list_small.append(dim_list_x[j])
scale_x = float(size_x) / np.prod(dim_list_x[:label_n])
shift_x = 0.5 * scale_x
scale_y = float(size_y) / np.prod(dim_list_y[:label_n])
shift_y = 0.5 * scale_y
bbox = ax.get_window_extent().transformed(
fig.dpi_scale_trans.inverted())
fontsize = 35 * bbox.width / np.prod(dim_list_x[:label_n]) / label_n
opts = {'fontsize': fontsize,
'color': theme2color_of_lines[theme],
'horizontalalignment': 'center',
'verticalalignment': 'center'}
for i in range(np.prod(dim_list_small)):
x, y = _to_qubism_index_pair(i, dim_list_small, how=how)
seq = _index_to_sequence(i, dim_list=dim_list_small)
ax.text(scale_x * x + shift_x,
size_y - (scale_y * y + shift_y),
_sequence_to_latex(seq),
**opts)
return fig, ax
def plot_schmidt(ket, splitting=None,
labels_iteration=(3, 2),
theme='light',
fig=None, ax=None, figsize=(6, 6)):
"""
Plotting scheme related to Schmidt decomposition.
Converts a state into a matrix (A_ij -> A_i^j),
where rows are first particles and columns - last.
See also: plot_qubism with how='before_after' for a similar plot.
Parameters
----------
ket : Qobj
Pure state for plotting.
splitting : int
Plot for a number of first particles versus the rest.
If not given, it is (number of particles + 1) // 2.
theme : 'light' (default) or 'dark'
Set coloring theme for mapping complex values into colors.
See: complex_array_to_rgb.
labels_iteration : int or pair of ints (default (3,2))
Number of particles to be shown as tick labels,
for first (vertical) and last (horizontal) particles, respectively.
fig : a matplotlib figure instance
The figure canvas on which the plot will be drawn.
ax : a matplotlib axis instance
The axis context in which the plot will be drawn.
figsize : (width, height)
The size of the matplotlib figure (in inches) if it is to be created
(that is, if no 'fig' and 'ax' arguments are passed).
Returns
-------
fig, ax : tuple
A tuple of the matplotlib figure and axes instances used to produce
the figure.
"""
if not isket(ket):
raise Exception("Schmidt plot works only for pure states, i.e. kets.")
if not fig and not ax:
fig, ax = plt.subplots(1, 1, figsize=figsize)
dim_list = ket.dims[0]
if splitting is None:
splitting = (len(dim_list) + 1) // 2
if isinstance(labels_iteration, int):
labels_iteration = labels_iteration, labels_iteration
ketdata = ket.full()
dim_list_y = dim_list[:splitting]
dim_list_x = dim_list[splitting:]
size_x = np.prod(dim_list_x)
size_y = np.prod(dim_list_y)
ketdata = ketdata.reshape((size_y, size_x))
dim_list_small_x = dim_list_x[:labels_iteration[1]]
dim_list_small_y = dim_list_y[:labels_iteration[0]]
quadrants_x = np.prod(dim_list_small_x)
quadrants_y = np.prod(dim_list_small_y)
ticks_x = [size_x / quadrants_x * (i + 0.5)
for i in range(quadrants_x)]
ticks_y = [size_y / quadrants_y * (quadrants_y - i - 0.5)
for i in range(quadrants_y)]
labels_x = [_sequence_to_latex(_index_to_sequence(i*size_x // quadrants_x,
dim_list=dim_list_x))
for i in range(quadrants_x)]
labels_y = [_sequence_to_latex(_index_to_sequence(i*size_y // quadrants_y,
dim_list=dim_list_y))
for i in range(quadrants_y)]
ax.set_xticks(ticks_x)
ax.set_xticklabels(labels_x)
ax.set_yticks(ticks_y)
ax.set_yticklabels(labels_y)
ax.set_xlabel("last particles")
ax.set_ylabel("first particles")
ax.imshow(complex_array_to_rgb(ketdata, theme=theme),
interpolation="none",
extent=(0, size_x, 0, size_y))
return fig, ax
|
qutip/qutip
|
qutip/visualization.py
|
Python
|
bsd-3-clause
| 57,042
|
[
"Mayavi"
] |
3d467647404334702806693d1d7c53b3a55de071c268004f1b58b81206ef6c1d
|
"""Definitions for the review request detail view."""
from __future__ import unicode_literals
import hashlib
import logging
from collections import Counter, defaultdict
from datetime import datetime
from itertools import chain
from django.db.models import Q
from django.utils import six
from django.utils.timezone import utc
from django.utils.translation import ugettext as _
from djblets.registries.registry import (ALREADY_REGISTERED,
ATTRIBUTE_REGISTERED,
NOT_REGISTERED)
from djblets.util.compat.django.template.context import flatten_context
from djblets.util.compat.django.template.loader import render_to_string
from djblets.util.dates import get_latest_timestamp
from djblets.util.decorators import cached_property
from reviewboard.diffviewer.models import DiffCommit
from reviewboard.registries.registry import OrderedRegistry
from reviewboard.reviews.builtin_fields import (CommitListField,
ReviewRequestPageDataMixin)
from reviewboard.reviews.features import status_updates_feature
from reviewboard.reviews.fields import get_review_request_fieldsets
from reviewboard.reviews.models import (BaseComment,
Comment,
FileAttachmentComment,
GeneralComment,
Review,
ReviewRequest,
ScreenshotComment,
StatusUpdate)
logger = logging.getLogger(__name__)
class ReviewRequestPageData(object):
"""Data for the review request page.
The review request detail page needs a lot of data from the database, and
going through the standard model relations will result in a lot more
queries than necessary. This class bundles all that data together and
handles pre-fetching and re-associating as necessary to limit the required
number of queries.
All of the attributes within the class may not be available until both
:py:meth:`query_data_pre_etag` and :py:meth:`query_data_post_etag` are
called.
This object is not meant to be public API, and may change at any time. You
should not use it in extension code.
Attributes:
body_bottom_replies (dict):
A mapping from a top-level review ID to a list of the
:py:class:`~reviewboard.reviews.models.Review` objects which reply
to it.
body_top_replies (dict):
A mapping from a top-level review ID to a list of the
:py:class:`~reviewboard.reviews.models.Review` objects which reply
to it.
review_comments (dict):
A dictionary of comments across all reviews. The keys are
:py:class:`~reviewboard.reviews.models.review.Review` IDs and the
values are lists of comments.
draft_body_top_replies (dict):
A dictionary of draft replies to ``body_top`` fields across all
reviews. The keys are are
:py:class:`~reviewboard.reviews.models.review.Review` IDs that are
being replied to and the values are lists of replies.
draft_body_bottom_replies (dict):
A dictionary of draft replies to ``body_bottom`` fields across all
reviews. The keys are are
:py:class:`~reviewboard.reviews.models.review.Review` IDs that are
being replied to and the values are lists of replies.
draft_reply_comments (dict):
A dictionary of draft reply comments across all reviews. The keys
are :py:class:`~reviewboard.reviews.models.review.Review` IDs that
are being replied to and the values are lists of reply comments.
changedescs (list of reviewboard.changedescs.models.ChangeDescription):
All the change descriptions to be shown on the page.
diffsets (list of reviewboard.diffviewer.models.diffset.DiffSet):
All of the diffsets associated with the review request.
diffsets_by_id (dict):
A mapping from ID to
:py:class:`~reviewboard.diffviewer.models.diffset.DiffSet`.
draft (reviewboard.reviews.models.ReviewRequestDraft):
The active draft of the review request, if any. May be ``None``.
active file_attachments (list of reviewboard.attachments.models.
FileAttachment):
All the active file attachments associated with the review request.
all_file_attachments (list of reviewboard.attachments.models.
FileAttachment):
All the file attachments associated with the review request.
file_attachments_by_id (dict):
A mapping from ID to
:py:class:`~reviewboard.attachments.models.FileAttachment`
issues (list of reviewboard.reviews.models.BaseComment):
A list of all the comments (of all types) which are marked as
issues.
issue_counts (dict):
A dictionary storing counts of the various issue states throughout
the page.
latest_changedesc_timestamp (datetime.datetime):
The timestamp of the most recent change description on the page.
latest_review_timestamp (datetime.datetime):
The timestamp of the most recent review on the page.
latest_timestamps_by_review_id (dict):
A mapping from top-level review ID to the latest timestamp of the
thread.
review_request (reviewboard.reviews.models.ReviewRequest):
The review request.
review_request_details (reviewboard.reviews.models.
base_review_request_details.
BaseReviewRequestDetails):
The review request (or the active draft thereof). In practice this
will either be a
:py:class:`~reviewboard.reviews.models.ReviewRequest` or a
:py:class:`~reviewboard.reviews.models.ReviewRequestDraft`.
reviews (list of reviewboard.reviews.models.reviews.Review):
All the reviews to be shown on the page. This includes any draft
reviews owned by the requesting user but not drafts owned by
others.
reviews_by_id (dict):
A mapping from ID to
:py:class:`~reviewboard.reviews.models.Review`.
active_screenshots (list of reviewboard.reviews.models.screenshots.
Screenshot):
All the active screenshots associated with the review request.
all_screenshots (list of reviewboard.reviews.models.Screenshot):
All the screenshots associated with the review request.
screenshots_by_id (dict):
A mapping from ID to
:py:class:`~reviewboard.reviews.models.Screenshot`.
all_status_updates (list of reviewboard.reviews.models.
status_updates.StatusUpdate):
All status updates recorded for the review request.
initial_status_updates (list of reviewboard.reviews.models.
status_updates.StatusUpdate):
The status updates recorded on the initial publish of the
review request.
change_status_updates (dict):
The status updates associated with change descriptions. Each key
in the dictionary is a
:py:class:`~reviewboard.changedescs.models.ChangeDescription` ID,
and each key is a list of
:py:class:`reviewboard.reviews.models. status_updates.StatusUpdate`
instances.
status_updates_enabled (bool):
Whether the status updates feature is enabled for this
review request. This does not necessarily mean that there are
status updates on the review request.
"""
def __init__(self, review_request, request, last_visited=None,
entry_classes=None):
"""Initialize the data object.
Args:
review_request (reviewboard.reviews.models.ReviewRequest):
The review request.
request (django.http.HttpRequest):
The HTTP request object.
last_visited (datetime.datetime, optional):
The date/time when the user last visited the review request.
entry_classes (list of BaseReviewRequestPageEntry, optional):
The list of entry classes that should be used for data
generation. If not provided, all registered entry classes
will be used.
"""
self.review_request = review_request
self.request = request
self.last_visited = last_visited
self.entry_classes = entry_classes or list(entry_registry)
# These are populated in query_data_pre_etag().
self.reviews = []
self.changedescs = []
self.diffsets = []
self.commits_by_diffset_id = {}
self.diffsets_by_id = {}
self.all_status_updates = []
self.latest_review_timestamp = None
self.latest_changedesc_timestamp = None
self.draft = None
# These are populated in query_data_post_etag().
self.initial_status_updates = []
self.change_status_updates = {}
self.reviews_by_id = {}
self.latest_timestamps_by_review_id = {}
self.body_top_replies = defaultdict(list)
self.body_bottom_replies = defaultdict(list)
self.review_request_details = None
self.active_file_attachments = []
self.all_file_attachments = []
self.file_attachments_by_id = {}
self.active_screenshots = []
self.all_comments = []
self.all_screenshots = []
self.screenshots_by_id = {}
self.review_comments = {}
self.draft_reply_comments = {}
self.draft_body_top_replies = defaultdict(list)
self.draft_body_bottom_replies = defaultdict(list)
self.issues = []
self.issue_counts = {
'total': 0,
'open': 0,
'resolved': 0,
'dropped': 0,
'verifying': 0,
}
self.status_updates_enabled = status_updates_feature.is_enabled(
local_site=review_request.local_site)
self._needs_draft = False
self._needs_reviews = False
self._needs_changedescs = False
self._needs_status_updates = False
self._needs_file_attachments = False
self._needs_screenshots = False
# There's specific entries being used for the data collection.
# Loop through them and determine what sets of data we need.
for entry_cls in self.entry_classes:
self._needs_draft = self._needs_draft or entry_cls.needs_draft
self._needs_reviews = (self._needs_reviews or
entry_cls.needs_reviews)
self._needs_changedescs = (self._needs_changedescs or
entry_cls.needs_changedescs)
self._needs_status_updates = (self._needs_status_updates or
entry_cls.needs_status_updates)
self._needs_file_attachments = (self._needs_file_attachments or
entry_cls.needs_file_attachments)
self._needs_screenshots = (self._needs_screenshots or
entry_cls.needs_screenshots)
def query_data_pre_etag(self):
"""Perform initial queries for the page.
This method will populate only the data needed to compute the ETag. We
avoid everything else until later so as to do the minimum amount
possible before reporting to the client that they can just use their
cached copy.
"""
# Query for all the reviews that should be shown on the page (either
# ones which are public or draft reviews owned by the current user).
reviews_query = Q(public=True)
if self.request.user.is_authenticated():
reviews_query |= Q(user_id=self.request.user.pk)
if self._needs_reviews or self._needs_status_updates:
self.reviews = list(
self.review_request.reviews
.filter(reviews_query)
.order_by('-timestamp')
.select_related('user', 'user__profile')
)
if len(self.reviews) == 0:
self.latest_review_timestamp = datetime.fromtimestamp(0, utc)
else:
self.latest_review_timestamp = self.reviews[0].timestamp
# Get all the public ChangeDescriptions.
if self._needs_changedescs:
self.changedescs = list(
self.review_request.changedescs.filter(public=True))
if self.changedescs:
self.latest_changedesc_timestamp = self.changedescs[0].timestamp
# Get the active draft (if any).
if self._needs_draft:
self.draft = self.review_request.get_draft(self.request.user)
# Get diffsets.
if self._needs_reviews:
self.diffsets = self.review_request.get_diffsets()
self.diffsets_by_id = self._build_id_map(self.diffsets)
# Get all status updates.
if self.status_updates_enabled and self._needs_status_updates:
self.all_status_updates = list(
self.review_request.status_updates.order_by('summary'))
def query_data_post_etag(self):
"""Perform remaining queries for the page.
This method will populate everything else needed for the display of the
review request page other than that which was required to compute the
ETag.
"""
self.reviews_by_id = self._build_id_map(self.reviews)
for status_update in self.all_status_updates:
if status_update.review_id is not None:
review = self.reviews_by_id[status_update.review_id]
review.status_update = status_update
status_update.review = review
if status_update.change_description_id:
self.change_status_updates.setdefault(
status_update.change_description_id,
[]).append(status_update)
else:
self.initial_status_updates.append(status_update)
for review in self.reviews:
review._body_top_replies = []
review._body_bottom_replies = []
body_reply_info = (
(review.body_top_reply_to_id,
self.body_top_replies,
self.draft_body_top_replies),
(review.body_bottom_reply_to_id,
self.body_bottom_replies,
self.draft_body_bottom_replies),
)
for reply_to_id, replies, draft_replies in body_reply_info:
if reply_to_id is not None:
replies[reply_to_id].append(review)
if not review.public:
draft_replies[reply_to_id].append(review)
# Find the latest reply timestamp for each top-level review.
parent_id = review.base_reply_to_id
if parent_id is not None:
new_timestamp = review.timestamp.replace(tzinfo=utc)
if parent_id in self.latest_timestamps_by_review_id:
old_timestamp = \
self.latest_timestamps_by_review_id[parent_id]
if old_timestamp < new_timestamp:
self.latest_timestamps_by_review_id[parent_id] = \
new_timestamp
else:
self.latest_timestamps_by_review_id[parent_id] = \
new_timestamp
# We've already attached all the status updates above, but
# any reviews that don't have status updates can still result
# in a query. We want to null those out.
if not hasattr(review, '_status_update_cache'):
review._status_update_cache = None
# Link up all the review body replies.
for reply_id, replies in six.iteritems(self.body_top_replies):
self.reviews_by_id[reply_id]._body_top_replies = reversed(replies)
for reply_id, replies in six.iteritems(self.body_bottom_replies):
self.reviews_by_id[reply_id]._body_bottom_replies = \
reversed(replies)
self.review_request_details = self.draft or self.review_request
# Get all the file attachments and screenshots.
#
# Note that we fetch both active and inactive file attachments and
# screenshots. We do this because even though they've been removed,
# they still will be rendered in change descriptions.
if self._needs_file_attachments or self._needs_reviews:
self.active_file_attachments = \
list(self.review_request_details.get_file_attachments())
self.all_file_attachments = (
self.active_file_attachments + list(
self.review_request_details
.get_inactive_file_attachments()))
self.file_attachments_by_id = \
self._build_id_map(self.all_file_attachments)
for attachment in self.all_file_attachments:
attachment._comments = []
if self._needs_screenshots or self._needs_reviews:
self.active_screenshots = \
list(self.review_request_details.get_screenshots())
self.all_screenshots = (
self.active_screenshots +
list(self.review_request_details.get_inactive_screenshots()))
self.screenshots_by_id = self._build_id_map(self.all_screenshots)
for screenshot in self.all_screenshots:
screenshot._comments = []
if self.reviews:
review_ids = list(six.iterkeys(self.reviews_by_id))
for model, review_field_name, key, ordering in (
(GeneralComment,
'general_comments',
'general_comments',
('generalcomment__timestamp',)),
(ScreenshotComment,
'screenshot_comments',
'screenshot_comments',
('screenshotcomment__timestamp',)),
(FileAttachmentComment,
'file_attachment_comments',
'file_attachment_comments',
('fileattachmentcomment__timestamp',)),
(Comment,
'comments',
'diff_comments',
('comment__filediff',
'comment__first_line',
'comment__timestamp'))):
# Due to mistakes in how we initially made the schema, we have
# a ManyToManyField in between comments and reviews, instead of
# comments having a ForeignKey to the review. This makes it
# difficult to easily go from a comment to a review ID.
#
# The solution to this is to not query the comment objects, but
# rather the through table. This will let us grab the review
# and comment in one go, using select_related.
#
# Note that we must always order it by something or we'll get
# the indexed order of the through table's entry, which may
# not align with the correct order of comments.
related_field = Review._meta.get_field(review_field_name)
comment_field_name = related_field.m2m_reverse_field_name()
through = related_field.rel.through
objs = list(
through.objects.filter(review__in=review_ids)
.select_related()
.order_by(*ordering)
)
# We do two passes. One to build a mapping, and one to actually
# process comments.
comment_map = {}
for obj in objs:
comment = getattr(obj, comment_field_name)
comment._type = key
comment._replies = []
comment_map[comment.pk] = comment
for obj in objs:
comment = getattr(obj, comment_field_name)
self.all_comments.append(comment)
# Short-circuit some object fetches for the comment by
# setting some internal state on them.
assert obj.review_id in self.reviews_by_id
review = self.reviews_by_id[obj.review_id]
comment.review_obj = review
comment._review = review
comment._review_request = self.review_request
# If the comment has an associated object (such as a file
# attachment) that we've already fetched, attach it to
# prevent future queries.
if isinstance(comment, FileAttachmentComment):
attachment_id = comment.file_attachment_id
f = self.file_attachments_by_id[attachment_id]
comment.file_attachment = f
f._comments.append(comment)
diff_against_id = \
comment.diff_against_file_attachment_id
if diff_against_id is not None:
f = self.file_attachments_by_id[diff_against_id]
comment.diff_against_file_attachment = f
elif isinstance(comment, ScreenshotComment):
screenshot = \
self.screenshots_by_id[comment.screenshot_id]
comment.screenshot = screenshot
screenshot._comments.append(comment)
# We've hit legacy database cases where there were entries
# that weren't a reply, and were just orphaned. Check and
# ignore anything we don't expect.
is_reply = review.is_reply()
if is_reply == comment.is_reply():
if is_reply:
replied_comment = comment_map[comment.reply_to_id]
replied_comment._replies.append(comment)
if not review.public:
self.draft_reply_comments.setdefault(
review.base_reply_to_id, []).append(
comment)
else:
self.review_comments.setdefault(
review.pk, []).append(comment)
if review.public and comment.issue_opened:
status_key = comment.issue_status_to_string(
comment.issue_status)
# Both "verifying" states get lumped together in the
# same section in the issue summary table.
if status_key in ('verifying-resolved',
'verifying-dropped'):
status_key = 'verifying'
self.issue_counts[status_key] += 1
self.issue_counts['total'] += 1
self.issues.append(comment)
if self.review_request.created_with_history:
pks = [diffset.pk for diffset in self.diffsets]
if self.draft and self.draft.diffset_id is not None:
pks.append(self.draft.diffset_id)
self.commits_by_diffset_id = DiffCommit.objects.by_diffset_ids(pks)
def get_entries(self):
"""Return all entries for the review request page.
This will create and populate entries for the page (based on the
entry classes provided in :py:attr:`entry_classes`). The entries can
then be injected into the review request page.
Returns:
dict:
A dictionary of entries. This has ``initial`` and ``main`` keys,
corresponding to
:py:attr:`BaseReviewRequestPageEntry.ENTRY_POS_INITIAL` and
:py:attr:`BaseReviewRequestPageEntry.ENTRY_POS_MAIN` entries,
respectively.
The ``initial`` entries are sorted in registered entry order,
while the ``main`` entries are sorted in timestamp order.
"""
initial_entries = []
main_entries = []
for entry_cls in self.entry_classes:
new_entries = entry_cls.build_entries(self)
if new_entries is not None:
if entry_cls.entry_pos == entry_cls.ENTRY_POS_INITIAL:
initial_entries += new_entries
elif entry_cls.entry_pos == entry_cls.ENTRY_POS_MAIN:
main_entries += new_entries
for entry in initial_entries:
entry.finalize()
for entry in main_entries:
entry.finalize()
# Sort all the main entries (such as reviews and change descriptions)
# by their timestamp. We don't sort the initial entries, which are
# displayed in registration order.
main_entries.sort(key=lambda item: item.added_timestamp)
return {
'initial': initial_entries,
'main': main_entries,
}
def _build_id_map(self, objects):
"""Return an ID map from a list of objects.
Args:
objects (list):
A list of objects queried via django.
Returns:
dict:
A dictionary mapping each ID to the resulting object.
"""
return {
obj.pk: obj
for obj in objects
}
class BaseReviewRequestPageEntry(object):
"""An entry on the review detail page.
This contains backend logic and frontend templates for one of the boxes
that appears below the main review request box on the review request detail
page.
Attributes:
added_timestamp (datetime.datetime):
The timestamp of the entry. This represents the added time for the
entry, and is used for sorting the entry in the page. This
timestamp should never change.
avatar_user (django.contrib.auth.models.User):
The user to display an avatar for. This can be ``None``, in which
case no avatar will be displayed. Templates can also override the
avatar HTML instead of using this.
collapsed (bool):
Whether the entry should be initially collapsed.
entry_id (unicode):
The ID of the entry. This will be unique across this type of entry,
and may refer to a database object ID.
updated_timestamp (datetime.datetime):
The timestamp when the entry was last updated. This reflects new
updates or activity on the entry.
"""
#: An initial entry appearing above the review-like boxes.
ENTRY_POS_INITIAL = 1
#: An entry appearing in the main area along with review-like boxes.
ENTRY_POS_MAIN = 2
#: The ID used for entries of this type.
entry_type_id = None
#: The type of entry on the page.
#:
#: By default, this is a box type, which will appear along with other
#: reviews and change descriptions.
entry_pos = ENTRY_POS_MAIN
#: Whether the entry needs a review request draft to be queried.
#:
#: If set, :py:attr:`ReviewRequestPageData.draft` will be set (if a draft
#: exists).
needs_draft = False
#: Whether the entry needs reviews, replies, and comments to be queried.
#:
#: If set, :py:attr:`ReviewRequestPageData.reviews`,
#: :py:attr:`ReviewRequestPageData.diffsets`,
#: :py:attr:`ReviewRequestPageData.diffsets_by_id`,
#: :py:attr:`ReviewRequestPageData.active_file_attachments`,
#: :py:attr:`ReviewRequestPageData.all_file_attachments`,
#: :py:attr:`ReviewRequestPageData.file_attachments_by_id`,
#: :py:attr:`ReviewRequestPageData.active_file_screenshots`,
#: :py:attr:`ReviewRequestPageData.all_file_screenshots`, and
#: :py:attr:`ReviewRequestPageData.file_screenshots_by_id` will be set.
needs_reviews = False
#: Whether the entry needs change descriptions to be queried.
#:
#: If set, :py:attr:`ReviewRequestPageData.changedescs` will be queried.
needs_changedescs = False
#: Whether the entry needs status updates-related data to be queried.
#:
#: This will also fetch the reviews, but will not automatically fetch any
#: comments or other related data. For that, set :py:attr:`needs_reviews`.
#:
#: If set, :py:attr:`ReviewRequestPageData.reviews`,
#: If set, :py:attr:`ReviewRequestPageData.all_status_updates`,
#: If set, :py:attr:`ReviewRequestPageData.initial_status_updates`, and
#: If set, :py:attr:`ReviewRequestPageData.change_status_updates` will be
#: set.
needs_status_updates = False
#: Whether the entry needs file attachment data to be queried.
#:
#: If set, :py:attr:`ReviewRequestPageData.active_file_attachments`,
#: :py:attr:`ReviewRequestPageData.all_file_attachments`, and
#: :py:attr:`ReviewRequestPageData.file_attachments_by_id` will be set.
needs_file_attachments = False
#: Whether the entry needs screenshot data to be queried.
#:
#: Most entries should never need this, as screenshots are deprecated.
#:
#: If set, :py:attr:`ReviewRequestPageData.active_screenshots`,
#: :py:attr:`ReviewRequestPageData.all_screenshots`, and
#: :py:attr:`ReviewRequestPageData.screenshots_by_id` will be set.
needs_screenshots = False
#: The template to render for the HTML.
template_name = None
#: The template to render for any JavaScript.
js_template_name = 'reviews/entries/entry.js'
#: The name of the JavaScript Backbone.Model class for this entry.
js_model_class = 'RB.ReviewRequestPage.Entry'
#: The name of the JavaScript Backbone.View class for this entry.
js_view_class = 'RB.ReviewRequestPage.EntryView'
#: Whether this entry has displayable content.
#:
#: This can be overridden as a property to calculate whether to render
#: the entry, or disabled altogether.
has_content = True
@classmethod
def build_entries(cls, data):
"""Generate entry instances from review request page data.
Subclasses should override this to yield any entries needed, based on
the page data.
Args:
data (ReviewRequestPageData):
The data used for the entries on the page.
Yields:
BaseReviewRequestPageEntry:
An entry to include on the page.
"""
pass
@classmethod
def build_etag_data(cls, data, entry=None, **kwargs):
"""Build ETag data for the entry.
This will be incorporated into the ETag for the page.
Version Changed:
4.0.4:
Added ``entry`` and ``**kwargs` arguments.
Args:
data (ReviewRequestPageData):
The computed data (pre-ETag) for the page.
entry (BaseReviewRequestPageEntry):
A specific entry to build ETags for.
**kwargs (dict, unused):
Additional keyword arguments for future expansion.
Returns:
unicode:
The ETag data for the entry.
"""
return ''
def __init__(self, data, entry_id, added_timestamp,
updated_timestamp=None, avatar_user=None):
"""Initialize the entry.
Args:
data (ReviewRequestPageData):
The computed data for the page.
entry_id (unicode):
The ID of the entry. This must be unique across this type
of entry, and may refer to a database object ID.
added_timestamp (datetime.datetime):
The timestamp of the entry. This represents the added time
for the entry, and is used for sorting the entry in the page.
This timestamp should never change.
updated_timestamp (datetime.datetime, optional):
The timestamp when the entry was last updated. This should
reflect new updates or activity on the entry.
avatar_user (django.contrib.auth.models.User, optional):
The user to display an avatar for. This can be ``None``, in
which case no avatar will be displayed. Templates can also
override the avatar HTML instead of using this.
"""
self.data = data
self.entry_id = entry_id
self.added_timestamp = added_timestamp
self.updated_timestamp = updated_timestamp or added_timestamp
self.avatar_user = avatar_user
def __repr__(self):
"""Return a string representation for this entry.
Returns:
unicode:
A string representation for the entry.
"""
return (
'%s(entry_type_id=%s, entry_id=%s, added_timestamp=%s, '
'updated_timestamp=%s, collapsed=%s)'
% (self.__class__.__name__, self.entry_type_id, self.entry_id,
self.added_timestamp, self.updated_timestamp, self.collapsed)
)
@cached_property
def collapsed(self):
"""Whether the entry is collapsed.
This will consist of a cached value computed from
:py:meth:`calculate_collapsed`. Subclasses should override that
method.
"""
return self.calculate_collapsed()
def is_entry_new(self, last_visited, user, **kwargs):
"""Return whether the entry is new, from the user's perspective.
By default, this compares the last visited time to the timestamp
on the object. Subclasses can override this to provide additional
logic.
Args:
last_visited (datetime.datetime):
The last visited timestamp.
user (django.contrib.auth.models.User):
The user viewing the page.
**kwargs (dict):
Additional keyword arguments.
Returns:
bool:
``True`` if the entry will be shown as new. ``False`` if it
will be shown as an existing entry.
"""
return (self.added_timestamp is not None and
last_visited < self.added_timestamp)
def calculate_collapsed(self):
"""Calculate whether the entry should currently be collapsed.
By default, this will collapse the entry if the last update is older
than the last time the user visited the entry and older than the last
Change Description (or there isn't one on the page yet).
Subclasses can augment or replace this logic as needed.
Returns:
bool:
``True`` if the entry should be collapsed. ``False`` if it should
be expanded.
"""
data = self.data
return bool(
# Collapse if older than the most recent review request
# change and there's no recent activity.
data.latest_changedesc_timestamp and
self.updated_timestamp < data.latest_changedesc_timestamp and
# Collapse if the page was previously visited and this entry is
# older than the last visited time.
data.last_visited and self.updated_timestamp < data.last_visited
)
def get_dom_element_id(self):
"""Return the ID used for the DOM element for this entry.
By default, this returns :py:attr:`entry_type_id` and
:py:attr:`entry_id` concatenated. Subclasses should override this if
they need something custom.
Returns:
unicode:
The ID used for the element.
"""
return '%s%s' % (self.entry_type_id, self.entry_id)
def get_js_model_data(self):
"""Return data to pass to the JavaScript Model during instantiation.
The data returned from this function will be provided to the model
when constructed.
Returns:
dict:
A dictionary of attributes to pass to the Model instance. By
default, it will be empty.
"""
return {}
def get_js_view_data(self):
"""Return data to pass to the JavaScript View during instantiation.
The data returned from this function will be provided to the view when
constructed.
Returns:
dict:
A dictionary of options to pass to the View instance. By
default, it will be empty.
"""
return {}
def get_extra_context(self, request, context):
"""Return extra template context for the entry.
Subclasses can override this to provide additional context needed by
the template for the page. By default, this returns an empty
dictionary.
Args:
request (django.http.HttpRequest):
The HTTP request from the client.
context (django.template.RequestContext):
The existing template context on the page.
Returns:
dict:
Extra context to use for the entry's template.
"""
return {}
def render_to_string(self, request, context):
"""Render the entry to a string.
If the entry doesn't have a template associated, or doesn't have
any content (as determined by :py:attr:`has_content`), then this
will return an empty string.
Args:
request (django.http.HttpRequest):
The HTTP request from the client.
context (django.template.RequestContext):
The existing template context on the page.
Returns:
unicode:
The resulting HTML for the entry.
"""
if not self.template_name or not self.has_content:
return ''
user = request.user
last_visited = context.get('last_visited')
new_context = flatten_context(context)
try:
new_context.update({
'entry': self,
'entry_is_new': (
user.is_authenticated() and
last_visited is not None and
self.is_entry_new(last_visited=last_visited,
user=user)),
'show_entry_statuses_area': (
self.entry_pos !=
BaseReviewRequestPageEntry.ENTRY_POS_INITIAL),
})
new_context.update(self.get_extra_context(request, context))
except Exception as e:
logger.exception('Error generating template context for %s '
'(ID=%s): %s',
self.__class__.__name__, self.entry_id, e)
return ''
try:
return render_to_string(template_name=self.template_name,
context=new_context,
request=request)
except Exception as e:
logger.exception('Error rendering template for %s (ID=%s): %s',
self.__class__.__name__, self.entry_id, e)
return ''
def finalize(self):
"""Perform final computations after all comments have been added."""
pass
class ReviewEntryMixin(object):
"""Mixin to provide functionality for entries containing reviews."""
def is_review_collapsed(self, review):
"""Return whether a review should be collapsed.
A review is collapsed if all the following conditions are true:
* There are no issues currently waiting to be resolved.
* There are no draft replies to any comments or the body fields.
* The review has not been seen since the latest activity on it
(or seen at all).
Args:
review (reviewboard.reviews.models.review.Review):
The review to compute the collapsed state for.
Returns:
bool:
``True`` if the review should be collapsed. ``False`` if not.
"""
data = self.data
latest_reply_timestamp = \
data.latest_timestamps_by_review_id.get(review.pk)
has_comments_with_issues = any(
(comment.issue_opened and
comment.issue_status in (comment.OPEN,
comment.VERIFYING_RESOLVED,
comment.VERIFYING_DROPPED))
for comment in data.review_comments.get(review.pk, [])
)
return bool(
# Reviews containing comments with open issues should never be
# collapsed.
not has_comments_with_issues and
# Draft reviews with replies should never be collapsed.
not data.draft_body_top_replies.get(review.pk) and
not data.draft_body_bottom_replies.get(review.pk) and
not data.draft_reply_comments.get(review.pk) and
# Don't collapse unless the user has visited the page before
# and the review is older than their last visit.
data.last_visited and (
review.timestamp < data.last_visited and
(not latest_reply_timestamp or
latest_reply_timestamp < data.last_visited)
)
)
def serialize_review_js_model_data(self, review):
"""Serialize information on a review for JavaScript models.
Args:
review (reviewboard.reviews.models.review.Review):
The review to serialize.
Returns:
dict:
The serialized data for the JavaScript model.
"""
return {
'id': review.pk,
'shipIt': review.ship_it,
'public': True,
'bodyTop': review.body_top,
'bodyBottom': review.body_bottom,
}
class DiffCommentsSerializerMixin(object):
"""Mixin to provide diff comment data serialization."""
def serialize_diff_comments_js_model_data(self, diff_comments):
"""Serialize information on diff comments for JavaScript models.
Args:
diff_comments (list of reviewboard.reviews.models.diff_comment.
Comment):
The list of comments to serialize.
Returns:
dict:
The serialized data for the JavaScript model.
"""
diff_comments_data = []
for comment in diff_comments:
key = '%s' % comment.filediff_id
if comment.interfilediff_id:
key = '%s-%s' % (key, comment.interfilediff_id)
diff_comments_data.append((six.text_type(comment.pk), key))
return diff_comments_data
class StatusUpdatesEntryMixin(DiffCommentsSerializerMixin, ReviewEntryMixin):
"""A mixin for any entries which can include status updates.
This provides common functionality for the two entries that include status
updates (the initial status updates entry and change description entries).
Attributes:
status_updates (list of reviewboard.reviews.models.StatusUpdate):
The status updates in this entry.
status_updates_by_review (dict):
A mapping from review ID to the matching status update.
"""
needs_reviews = True
needs_status_updates = True
@classmethod
def build_etag_data(cls, data, entry=None, **kwargs):
"""Build ETag data for the entry.
This will be incorporated into the ETag for the page and for
page updates.
ETags are influenced by a status update's service ID, state,
timestamp, and description.
The result will be encoded as a SHA1 hash.
Args:
data (ReviewRequestPageData):
The computed data (pre-ETag) for the page.
entry (StatusUpdatesEntryMixin):
A specific entry to build ETags for.
**kwargs (dict, unused):
Additional keyword arguments for future expansion.
Returns:
unicode:
The ETag data for the entry.
"""
if entry is not None:
status_updates = entry.status_updates
elif data.status_updates_enabled:
status_updates = data.all_status_updates
else:
status_updates = []
if status_updates:
etag = ':'.join(
'%s:%s:%s:%s' % (
status_update.service_id,
status_update.state,
status_update.timestamp,
status_update.description,
)
for status_update in status_updates
)
else:
etag = ''
etag = '%s:%s' % (
super(StatusUpdatesEntryMixin, cls).build_etag_data(data),
etag,
)
return hashlib.sha1(etag.encode('utf-8')).hexdigest()
def __init__(self):
"""Initialize the entry."""
self.status_updates = []
self.status_updates_by_review = {}
self.state_counts = Counter()
def are_status_updates_collapsed(self, status_updates):
"""Return whether all status updates should be collapsed.
This considers all provided status updates when computing the
collapsed state. It's meant to be used along with other logic to
compute an entry's collapsed state.
Status updates that are pending or have not yet been seen by the user
(assuming they've viewed the page at least once) are not collapsed.
Otherwise, the result is based off the review's collapsed state for
each status update. Status updates not containing a review are
considered collapsable, and ones containing a review defer to
:py:meth:`ReviewEntryMixin.is_review_collapsed` for a result.
Args:
status_updates (list of reviewboard.reviews.models.status_update.
StatusUpdate):
The list of status updates to compute the collapsed state for.
Returns:
bool:
``True`` if all status updates are marked as collapsed. ``False``
if any are not marked as collapsed.
"""
data = self.data
for status_update in status_updates:
if (data.last_visited and
status_update.timestamp > data.last_visited):
return False
if (status_update.effective_state in (status_update.PENDING,
status_update.NOT_YET_RUN)):
return False
if status_update.review_id is not None:
review = data.reviews_by_id[status_update.review_id]
if not self.is_review_collapsed(review):
return False
return True
def add_update(self, update):
"""Add a status update to the entry.
Args:
update (reviewboard.reviews.models.StatusUpdate):
The status update to add.
"""
self.status_updates.append(update)
self.status_updates_by_review[update.review_id] = update
update.comments = {
'diff_comments': [],
'screenshot_comments': [],
'file_attachment_comments': [],
'general_comments': [],
}
state = update.effective_state
if state in (StatusUpdate.DONE_FAILURE,
StatusUpdate.ERROR,
StatusUpdate.TIMEOUT):
update.header_class = 'status-update-state-failure'
elif state == StatusUpdate.PENDING:
update.header_class = 'status-update-state-pending'
elif state == StatusUpdate.NOT_YET_RUN:
update.header_class = 'status-update-state-not-yet-run'
elif state == StatusUpdate.DONE_SUCCESS:
update.header_class = 'status-update-state-success'
else:
raise ValueError('Unexpected state "%s"' % state)
if state == StatusUpdate.TIMEOUT:
description = _('timed out.')
elif state == StatusUpdate.NOT_YET_RUN:
description = _('not yet run.')
else:
description = update.description
update.summary_html = render_to_string(
template_name='reviews/status_update_summary.html',
context={
'action_name': update.action_name,
'can_run': update.can_run,
'description': description,
'header_class': update.header_class,
'status_update_id': update.pk,
'summary': update.summary,
'url': update.url,
'url_text': update.url_text,
})
def populate_status_updates(self, status_updates):
"""Populate the list of status updates for the entry.
This will add all the provided status updates and all comments from
their reviews. It will also uncollapse the entry if there are any
draft replies owned by the user.
Args:
status_updates (list of reviewboard.reviews.models.status_update.
StatusUpdate):
The list of status updates to add.
"""
data = self.data
for update in status_updates:
self.add_update(update)
# Add all the comments for the review on this status
# update.
for comment in data.review_comments.get(update.review_id, []):
self.add_comment(comment._type, comment)
def add_comment(self, comment_type, comment):
"""Add a comment to the entry.
This will associate the comment with the correct status update.
Args:
comment_type (unicode):
The type of comment (an index into the :py:attr:`comments`
dictionary).
comment (reviewboard.reviews.models.BaseComment):
The comment to add.
"""
update = self.status_updates_by_review[comment.review_obj.pk]
update.comments[comment_type].append(comment)
def finalize(self):
"""Perform final computations after all comments have been added."""
for update in self.status_updates:
self.state_counts[update.effective_state] += 1
summary_parts = []
if self.state_counts[StatusUpdate.DONE_FAILURE] > 0:
summary_parts.append(
_('%s failed') % self.state_counts[StatusUpdate.DONE_FAILURE])
if self.state_counts[StatusUpdate.DONE_SUCCESS] > 0:
summary_parts.append(
_('%s succeeded')
% self.state_counts[StatusUpdate.DONE_SUCCESS])
if self.state_counts[StatusUpdate.PENDING] > 0:
summary_parts.append(
_('%s pending') % self.state_counts[StatusUpdate.PENDING])
if self.state_counts[StatusUpdate.NOT_YET_RUN] > 0:
summary_parts.append(
_('%s not yet run')
% self.state_counts[StatusUpdate.NOT_YET_RUN])
if self.state_counts[StatusUpdate.ERROR] > 0:
summary_parts.append(
_('%s failed with error')
% self.state_counts[StatusUpdate.ERROR])
if self.state_counts[StatusUpdate.TIMEOUT] > 0:
summary_parts.append(
_('%s timed out')
% self.state_counts[StatusUpdate.TIMEOUT])
if (self.state_counts[StatusUpdate.DONE_FAILURE] > 0 or
self.state_counts[StatusUpdate.ERROR] > 0 or
self.state_counts[StatusUpdate.TIMEOUT] > 0):
self.state_summary_class = 'status-update-state-failure'
elif (self.state_counts[StatusUpdate.PENDING] > 0 or
self.state_counts[StatusUpdate.NOT_YET_RUN] > 0):
self.state_summary_class = 'status-update-state-pending'
elif self.state_counts[StatusUpdate.DONE_SUCCESS]:
self.state_summary_class = 'status-update-state-success'
self.state_summary = ', '.join(summary_parts)
def get_js_model_data(self):
"""Return data to pass to the JavaScript Model during instantiation.
The data returned from this function will be provided to the model
when constructed. This consists of information on the reviews for
status updates and the comments made on diffs.
Returns:
dict:
A dictionary of attributes to pass to the Model instance.
"""
diff_comments_data = list(chain.from_iterable(
self.serialize_diff_comments_js_model_data(
update.comments['diff_comments'])
for update in self.status_updates
if update.comments['diff_comments']
))
reviews_data = [
self.serialize_review_js_model_data(update.review)
for update in self.status_updates
if update.review_id is not None
]
model_data = {
'pendingStatusUpdates': (
self.state_counts[StatusUpdate.PENDING] > 0),
}
model_data.update({
key: value
for key, value in (('diffCommentsData', diff_comments_data),
('reviewsData', reviews_data))
if value
})
return model_data
class ReviewRequestEntry(BaseReviewRequestPageEntry):
"""An entry for the main review request box.
This is used to control the data queried by
:py:class:`ReviewRequestPageData` for display in the main review request
box. It does not render onto the page.
"""
entry_type_id = 'review-request'
entry_pos = BaseReviewRequestPageEntry.ENTRY_POS_INITIAL
js_template_name = None
js_model_class = None
js_view_class = None
needs_draft = True
# These are needed for the file attachments/screenshots area.
needs_file_attachments = True
needs_screenshots = True
# Reviews, comments, etc. are needed for the issue summary table.
needs_reviews = True
has_content = False
class InitialStatusUpdatesEntry(StatusUpdatesEntryMixin,
BaseReviewRequestPageEntry):
"""An entry for any status updates posted against the initial state.
:py:class:`~reviewboard.reviews.models.StatusUpdate` reviews (those created
by automated tools like static analysis checkers or CI systems) are shown
separately from ordinary reviews. When status updates are related to a
:py:class:`~reviewboard.changedescs.models.ChangeDescription`, they're
displayed within the change description box. Otherwise, they're shown in
their own box (immediately under the review request box), which is handled
by this class.
"""
entry_type_id = 'initial_status_updates'
entry_pos = BaseReviewRequestPageEntry.ENTRY_POS_INITIAL
template_name = 'reviews/entries/initial_status_updates.html'
js_model_class = 'RB.ReviewRequestPage.StatusUpdatesEntry'
js_view_class = 'RB.ReviewRequestPage.InitialStatusUpdatesEntryView'
@classmethod
def build_entries(cls, data):
"""Generate the entry instance from review request page data.
This will only generate a single instance.
Args:
data (ReviewRequestPageData):
The data used for the initial status update entry.
Yields:
InitialStatusUpdatesEntry:
The entry to include on the page.
"""
entry = cls(data=data)
entry.populate_status_updates(data.initial_status_updates)
yield entry
def __init__(self, data):
"""Initialize the entry.
Args:
data (ReviewRequestPageData):
Pre-queried data for the review request page.
"""
timestamps = [data.review_request.time_added] + [
status_update.timestamp
for status_update in data.initial_status_updates
]
StatusUpdatesEntryMixin.__init__(self)
BaseReviewRequestPageEntry.__init__(
self,
data=data,
entry_id='0',
added_timestamp=data.review_request.time_added,
updated_timestamp=get_latest_timestamp(timestamps))
@property
def has_content(self):
"""Whether there are any items to display in the entry.
Returns:
bool:
True if there are any initial status updates to display.
"""
return len(self.status_updates) > 0
def get_dom_element_id(self):
"""Return the ID used for the DOM element for this entry.
Returns:
unicode:
The ID used for the element.
"""
return self.entry_type_id
def is_entry_new(self, last_visited, user, **kwargs):
"""Return whether the entry is new, from the user's perspective.
The initial status updates entry is basically part of the review
request, and is never shown as new.
Args:
last_visited (datetime.datetime, unused):
The last visited timestamp.
user (django.contrib.auth.models.User, unused):
The user viewing the page.
**kwargs (dict, unused):
Additional keyword arguments.
Returns:
bool:
``False``, always.
"""
return False
def calculate_collapsed(self):
"""Calculate whether the entry should currently be collapsed.
The entry will be collapsed if there aren't yet any Change Descriptions
on the page and if there aren't any status updates with reviews that
should be expanded. See :py:meth:`ReviewEntryMixin.is_review_collapsed`
for the collapsing rules for reviews.
Returns:
bool:
``True`` if the entry should be collapsed. ``False`` if it should
be expanded.
"""
data = self.data
return (
# Don't collapse if the user has not seen this page before (or
# are anonymous) and there aren't any change descriptions yet.
(data.last_visited or len(data.changedescs) > 0) and
# Don't collapse if there are status updates containing reviews
# that should not be collapsed.
self.are_status_updates_collapsed(data.initial_status_updates)
)
class ReviewEntry(ReviewEntryMixin, DiffCommentsSerializerMixin,
BaseReviewRequestPageEntry):
"""A review box.
Attributes:
review (reviewboard.reviews.models.Review):
The review for this entry.
issue_open_count (int):
The count of open issues within this review.
has_issues (bool):
Whether there are any issues (open or not).
comments (dict):
A dictionary of comments. Each key in this represents a comment
type, and the values are lists of comment objects.
"""
entry_type_id = 'review'
needs_reviews = True
template_name = 'reviews/entries/review.html'
js_model_class = 'RB.ReviewRequestPage.ReviewEntry'
js_view_class = 'RB.ReviewRequestPage.ReviewEntryView'
@classmethod
def build_entries(cls, data):
"""Generate review entry instances from review request page data.
Args:
data (ReviewRequestPageData):
The data used for the entries on the page.
Yields:
ReviewEntry:
A review entry to include on the page.
"""
for review in data.reviews:
if (not review.public or
review.is_reply() or
(data.status_updates_enabled and
hasattr(review, 'status_update'))):
continue
entry = cls(data=data,
review=review)
for comment in data.review_comments.get(review.pk, []):
entry.add_comment(comment._type, comment)
yield entry
def __init__(self, data, review):
"""Initialize the entry.
Args:
data (ReviewRequestPageData):
Pre-queried data for the review request page.
review (reviewboard.reviews.models.Review):
The review.
"""
self.review = review
self.issue_open_count = 0
self.has_issues = False
self.comments = {
'diff_comments': [],
'screenshot_comments': [],
'file_attachment_comments': [],
'general_comments': [],
}
updated_timestamp = \
data.latest_timestamps_by_review_id.get(review.pk,
review.timestamp)
super(ReviewEntry, self).__init__(data=data,
entry_id=six.text_type(review.pk),
added_timestamp=review.timestamp,
updated_timestamp=updated_timestamp,
avatar_user=review.user)
@property
def can_revoke_ship_it(self):
"""Whether the Ship It can be revoked by the current user."""
return self.review.can_user_revoke_ship_it(self.data.request.user)
def get_dom_element_id(self):
"""Return the ID used for the DOM element for this entry.
Returns:
unicode:
The ID used for the element.
"""
return '%s%s' % (self.entry_type_id, self.review.pk)
def is_entry_new(self, last_visited, user, **kwargs):
"""Return whether the entry is new, from the user's perspective.
Args:
last_visited (datetime.datetime):
The last visited timestamp.
user (django.contrib.auth.models.User):
The user viewing the page.
**kwargs (dict, unused):
Additional keyword arguments.
Returns:
bool:
``True`` if the entry will be shown as new. ``False`` if it
will be shown as an existing entry.
"""
return self.review.is_new_for_user(user=user,
last_visited=last_visited)
def add_comment(self, comment_type, comment):
"""Add a comment to this entry.
Args:
comment_type (unicode):
The type of comment (an index into the :py:attr:`comments`
dictionary).
comment (reviewboard.reviews.models.BaseComment):
The comment to add.
"""
self.comments[comment_type].append(comment)
if comment.issue_opened:
self.has_issues = True
if comment.issue_status in (BaseComment.OPEN,
BaseComment.VERIFYING_RESOLVED,
BaseComment.VERIFYING_DROPPED):
self.issue_open_count += 1
def get_js_model_data(self):
"""Return data to pass to the JavaScript Model during instantiation.
The data returned from this function will be provided to the model
when constructed. This consists of information on the review and the
comments made on diffs.
Returns:
dict:
A dictionary of attributes to pass to the Model instance.
"""
model_data = {
'reviewData': self.serialize_review_js_model_data(self.review),
}
diff_comments_data = self.serialize_diff_comments_js_model_data(
self.comments['diff_comments'])
if diff_comments_data:
model_data['diffCommentsData'] = diff_comments_data
return model_data
def calculate_collapsed(self):
"""Calculate whether the entry should currently be collapsed.
The entry will be collapsed if the review is marked as collapsed. See
:py:meth:`ReviewEntryMixin.is_review_collapsed` for the collapsing
rules for reviews.
Returns:
bool:
``True`` if the entry should be collapsed. ``False`` if it should
be expanded.
"""
return self.is_review_collapsed(self.review)
class ChangeEntry(StatusUpdatesEntryMixin, BaseReviewRequestPageEntry):
"""A change description box.
Attributes:
changedesc (reviewboard.changedescs.models.ChangeDescription):
The change description for this entry.
"""
entry_type_id = 'changedesc'
needs_changedescs = True
needs_file_attachments = True
needs_screenshots = True
template_name = 'reviews/entries/change.html'
js_model_class = 'RB.ReviewRequestPage.ChangeEntry'
js_view_class = 'RB.ReviewRequestPage.ChangeEntryView'
@classmethod
def build_entries(cls, data):
"""Generate change entry instances from review request page data.
Args:
data (ReviewRequestPageData):
The data used for the entries on the page.
Yields:
ChangeEntry:
A change entry to include on the page.
"""
for changedesc in data.changedescs:
entry = cls(data=data,
changedesc=changedesc)
entry.populate_status_updates(
data.change_status_updates.get(changedesc.pk, []))
yield entry
def __init__(self, data, changedesc):
"""Initialize the entry.
Args:
data (ReviewRequestPageData):
Pre-queried data for the review request page.
changedesc (reviewboard.changedescs.models.ChangeDescription):
The change description for this entry.
"""
self.changedesc = changedesc
self.fields_changed_groups = []
status_updates = data.change_status_updates.get(changedesc.pk, [])
review_request = data.review_request
request = data.request
timestamps = [changedesc.timestamp] + [
status_update.timestamp
for status_update in status_updates
]
BaseReviewRequestPageEntry.__init__(
self,
data=data,
entry_id=six.text_type(changedesc.pk),
added_timestamp=changedesc.timestamp,
updated_timestamp=get_latest_timestamp(timestamps),
avatar_user=changedesc.get_user(review_request))
if data.status_updates_enabled:
StatusUpdatesEntryMixin.__init__(self)
cur_field_changed_group = None
# See if there was a review request status change.
status_change = changedesc.fields_changed.get('status')
if status_change:
assert 'new' in status_change
self.new_status = ReviewRequest.status_to_string(
status_change['new'][0])
else:
self.new_status = None
# Process the list of fields, in order by fieldset. These will be
# put into groups composed of inline vs. full-width field values,
# for render into the box.
fieldsets = get_review_request_fieldsets(
include_change_entries_only=True)
for fieldset in fieldsets:
for field_cls in fieldset.field_classes:
field_id = field_cls.field_id
if field_id not in changedesc.fields_changed:
continue
inline = field_cls.change_entry_renders_inline
if (not cur_field_changed_group or
cur_field_changed_group['inline'] != inline):
# Begin a new group of fields.
cur_field_changed_group = {
'inline': inline,
'fields': [],
}
self.fields_changed_groups.append(cur_field_changed_group)
if issubclass(field_cls, ReviewRequestPageDataMixin):
field = field_cls(review_request, request=request,
data=data)
else:
field = field_cls(review_request, request=request)
cur_field_changed_group['fields'] += \
field.get_change_entry_sections_html(
changedesc.fields_changed[field_id])
def get_dom_element_id(self):
"""Return the ID used for the DOM element for this entry.
Returns:
unicode:
The ID used for the element.
"""
return '%s%s' % (self.entry_type_id, self.changedesc.pk)
def is_entry_new(self, last_visited, user, **kwargs):
"""Return whether the entry is new, from the user's perspective.
Args:
last_visited (datetime.datetime):
The last visited timestamp.
user (django.contrib.auth.models.User):
The user viewing the page.
**kwargs (dict, unused):
Additional keyword arguments.
Returns:
bool:
``True`` if the entry will be shown as new. ``False`` if it
will be shown as an existing entry.
"""
return self.changedesc.is_new_for_user(user=user,
last_visited=last_visited,
model=self.data.review_request)
def calculate_collapsed(self):
"""Calculate whether the entry should currently be collapsed.
The entry will be collapsed if the timestamp of the Change Description
is older than that of the most recent Change Description and there
aren't any status updates with reviews that should be expanded. see
:py:meth:`ReviewEntryMixin.is_review_collapsed` for the collapsing
rules for reviews.
Returns:
bool:
``True`` if the entry should be collapsed. ``False`` if it should
be expanded.
"""
data = self.data
changedesc = self.changedesc
status_updates = data.change_status_updates.get(changedesc.pk, [])
return (
# If the change is older than the newest change, consider it
# for collapsing.
changedesc.timestamp < data.latest_changedesc_timestamp and
# Don't collapse if there are status updates containing reviews
# that should not be collapsed.
(not status_updates or
self.are_status_updates_collapsed(status_updates))
)
def get_js_model_data(self):
"""Return data to pass to the JavaScript Model during instantiation.
This will serialize commit information if present for the
:js:class:`RB.DiffCommitListView`.
Returns:
dict:
A dictionary of model data.
"""
model_data = super(ChangeEntry, self).get_js_model_data()
commit_info = self.changedesc.fields_changed.get(
CommitListField.field_id)
if commit_info:
commits = self.data.commits_by_diffset_id
old_commits = commits[commit_info['old']]
new_commits = commits[commit_info['new']]
model_data['commits'] = [
commit.serialize()
for commit in chain(old_commits, new_commits)
]
return model_data
class ReviewRequestPageEntryRegistry(OrderedRegistry):
"""A registry for types of entries on the review request page."""
lookup_attrs = ['entry_type_id']
errors = {
ALREADY_REGISTERED: _(
'This review request page entry is already registered.'
),
ATTRIBUTE_REGISTERED: _(
'A review request page entry with the entry_type_id '
'"%(attr_value)s" is already registered by another entry '
'(%(duplicate)s).'
),
NOT_REGISTERED: _(
'"%(attr_value)s" is not a registered review request page entry '
'ID.'
),
}
def get_entry(self, entry_type_id):
"""Return an entry with the given type ID.
Args:
entry_type_id (unicode):
The ID of the entry type to return.
Returns:
type:
The registered page entry type matching the ID, or ``None`` if
it could not be found.
"""
return self.get('entry_type_id', entry_type_id)
def get_defaults(self):
"""Return the default review request page entry types for the registry.
This is used internally by the registry to populate the list of
built-in types of entries that should be used on the review request
page.
Returns:
list of BaseReviewRequestPageEntry:
The list of default entry types.
"""
return [
ReviewRequestEntry,
InitialStatusUpdatesEntry,
ChangeEntry,
ReviewEntry,
]
entry_registry = ReviewRequestPageEntryRegistry()
|
chipx86/reviewboard
|
reviewboard/reviews/detail.py
|
Python
|
mit
| 73,785
|
[
"VisIt"
] |
33ddde9311250a7facc98ce85a376fe573df6f857bbe8fc855bff349fc837f16
|
import os
import shutil
import subprocess
first_revision = 1625
last_revision = 1630
make_inc="""
MAKE = make
F90 = mpif90
CXX = mpicxx
CC = mpicc
CPP_OPTS = -D_MPI_ -D_LIBAPW_
F90_OPTS = -O3 -Wall -cpp $(CPP_OPTS) -fopenmp -I/Users/anton/local/include
F90_LINK_OPTS=-fopenmp -lstdc++
#LAPACK_LIB = -llapack -lblas
LAPACK_LIB = $(HOME)/local/lib/liblapack.a $(HOME)/local/lib/libblas.a
LIBAPW = ./addons/cpp/libapw.a
# === collect all libraries under one name ===
LIBS = $(LAPACK_LIB) $(HDF5_LIB) $(XC_LIB) $(NFFT_LIB) $(MADNESS_LIB) $(LIBAPW)
"""
elk_in="""
tasks
0
nempty
20
maxscl
100
avec
1.0 1.0 -1.0
1.0 -1.0 1.0
-1.0 1.0 1.0
scale
2.708
atoms
1 : nspecies
'Fe.in' : spfname
1 : natoms
0.0 0.0 0.0 0.0 0.0 0.1 : atposl, bfcmt
ngridk
4 4 4
"""
Fe_in="""'Fe' : spsymb
'iron' : spname
-26.0000 : spzn
101799.2074 : spmass
0.392232E-06 2.0000 32.8043 750 : sprmin, rmt, sprmax, nrmt
10 : spnst
1 0 1 2.00000 T : spn, spl, spk, spocc, spcore
2 0 1 2.00000 T
2 1 1 2.00000 T
2 1 2 4.00000 T
3 0 1 2.00000 F
3 1 1 2.00000 F
3 1 2 4.00000 F
3 2 2 4.00000 F
3 2 3 2.00000 F
4 0 1 2.00000 F
1 : apword
0.1500 0 F : apwe0, apwdm, apwve
4 : nlx
0 1 : l, apword
0.1500 0 T : apwe0, apwdm, apwve
1 1 : l, apword
0.1500 0 T : apwe0, apwdm, apwve
2 1 : l, apword
0.1500 0 T : apwe0, apwdm, apwve
3 1 : l, apword
0.1500 0 T : apwe0, apwdm, apwve
6 : nlorb
0 2 : lorbl, lorbord
0.1500 0 T : lorbe0, lorbdm, lorbve
0.1500 1 T
1 2 : lorbl, lorbord
0.1500 0 T : lorbe0, lorbdm, lorbve
0.1500 1 T
2 2 : lorbl, lorbord
0.1500 0 T : lorbe0, lorbdm, lorbve
0.1500 1 T
3 2 : lorbl, lorbord
0.1500 0 T : lorbe0, lorbdm, lorbve
0.1500 1 T
0 3 : lorbl, lorbord
0.1500 0 F : lorbe0, lorbdm, lorbve
0.1500 1 F
-3.4344 0 T
1 3 : lorbl, lorbord
0.1500 0 F : lorbe0, lorbdm, lorbve
0.1500 1 F
-2.1817 0 T"""
#def initial_checkout():
# shutil.rmtree("trunk-tmp", 1)
# os.system("svn checkout --revision " + str(first_revision) + " http://exciting-plus.googlecode.com/svn/trunk/ trunk-tmp ")
def checkout(rev):
subprocess.call(["svn","checkout","--revision",str(rev),"http://exciting-plus.googlecode.com/svn/trunk/","trunk-tmp"])
def add_make_inc():
fout=open ("trunk-tmp/make.inc", "w")
fout.write(make_inc)
fout.close()
def make():
add_make_inc()
subprocess.call(["make","-C","./trunk-tmp","clean"])
subprocess.call(["make","-C","./trunk-tmp"])
return os.path.isfile("./trunk-tmp/src/elk")
def prepare_input(spinpol, exactrho):
shutil.rmtree("run-tmp", 1)
os.mkdir("run-tmp")
fout=open ("run-tmp/elk.in","w")
fout.write(elk_in)
if spinpol:
fout.write("spinpol\n.true.\n")
if exactrho:
fout.write("exactrho\n.true.\n")
fout.close()
fout=open ("run-tmp/Fe.in","w")
fout.write(Fe_in)
fout.close()
def execute():
wd = os.getcwd() + "/run-tmp"
ex = os.getcwd() + "/trunk-tmp/src/elk"
p = subprocess.Popen(ex,cwd=wd)
p.wait()
def get_results(results, testid):
fin = open("./run-tmp/TOTENERGY.OUT","r")
lines = fin.readlines()
fin.close()
results[testid] = lines[-1]
def run_elk_default_nm(results):
prepare_input(False, False)
execute()
get_results(results, "default_nm")
def run_elk_exactrho_nm(results):
prepare_input(False, True)
execute()
get_results(results, "exactrho_nm")
def run_elk_default_mag(results):
prepare_input(True, False)
execute()
get_results(results, "default_mag")
def run_elk_exactrho_mag(results):
prepare_input(True, True)
execute()
get_results(results, "exactrho_mag")
def run_tests(rout):
results = {}
run_elk_default_nm(results)
#run_elk_exactrho_nm(results)
run_elk_default_mag(results)
#run_elk_exactrho_mag(results)
rout.write(" default_nm : " + results["default_nm"] + "\n")
#rout.write(" exactrho_nm : " + results["exactrho_nm"] + "\n")
rout.write(" default_mag : " + results["default_mag"] + "\n")
#rout.write(" exactrho_mag : " + results["exactrho_mag"] + "\n")
def all_clean():
#shutil.rmtree("trunk-tmp", 1)
#shutil.rmtree("run-tmp", 1)
try:
os.remove("elk_results.txt")
except:
pass
all_clean()
for r in range(first_revision, last_revision + 1):
rout = open("elk_results.txt","a+")
rout.write("revision : " + str(r) + "\n")
checkout(r)
if (make()):
run_tests(rout)
else:
rout.write(" compilation error\n")
rout.write("\n")
rout.close()
|
shedsaw/exciting-plus-rgvw-mod
|
utilities/elk-svn-test.py
|
Python
|
gpl-3.0
| 5,922
|
[
"Elk",
"exciting"
] |
ad4974103c79e93e12d01d4c6194f2ae355a49e5417249d683b7f01e877d4053
|
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module provides classes for predicting new structures from existing ones.
"""
import functools
import itertools
import logging
from operator import mul
from monty.json import MSONable
from pymatgen.alchemy.filters import RemoveDuplicatesFilter, RemoveExistingFilter
from pymatgen.alchemy.materials import TransformedStructure
from pymatgen.alchemy.transmuters import StandardTransmuter
from pymatgen.analysis.structure_prediction.substitution_probability import (
SubstitutionProbability,
)
from pymatgen.core.periodic_table import get_el_sp
from pymatgen.transformations.standard_transformations import SubstitutionTransformation
__author__ = "Will Richards, Geoffroy Hautier"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "1.2"
__maintainer__ = "Will Richards"
__email__ = "wrichard@mit.edu"
__date__ = "Aug 31, 2012"
class Substitutor(MSONable):
"""
This object uses a data mined ionic substitution approach to propose
compounds likely to be stable. It relies on an algorithm presented in
Hautier, G., Fischer, C., Ehrlacher, V., Jain, A., and Ceder, G. (2011).
Data Mined Ionic Substitutions for the Discovery of New Compounds.
Inorganic Chemistry, 50(2), 656-663. doi:10.1021/ic102031h
"""
def __init__(self, threshold=1e-3, symprec=0.1, **kwargs):
"""
This substitutor uses the substitution probability class to
find good substitutions for a given chemistry or structure.
Args:
threshold:
probability threshold for predictions
symprec:
symmetry precision to determine if two structures
are duplicates
kwargs:
kwargs for the SubstitutionProbability object
lambda_table, alpha
"""
self._kwargs = kwargs
self._sp = SubstitutionProbability(**kwargs)
self._threshold = threshold
self._symprec = symprec
def get_allowed_species(self):
"""
returns the species in the domain of the probability function
any other specie will not work
"""
return self._sp.species
def pred_from_structures(
self,
target_species,
structures_list,
remove_duplicates=True,
remove_existing=False,
):
"""
performs a structure prediction targeting compounds containing all of
the target_species, based on a list of structure (those structures
can for instance come from a database like the ICSD). It will return
all the structures formed by ionic substitutions with a probability
higher than the threshold
Notes:
If the default probability model is used, input structures must
be oxidation state decorated. See AutoOxiStateDecorationTransformation
This method does not change the number of species in a structure. i.e
if the number of target species is 3, only input structures containing
3 species will be considered.
Args:
target_species:
a list of species with oxidation states
e.g., [Species('Li',1),Species('Ni',2), Species('O',-2)]
structures_list:
a list of dictionnary of the form {'structure':Structure object
,'id':some id where it comes from}
the id can for instance refer to an ICSD id.
remove_duplicates:
if True, the duplicates in the predicted structures will
be removed
remove_existing:
if True, the predicted structures that already exist in the
structures_list will be removed
Returns:
a list of TransformedStructure objects.
"""
target_species = [get_el_sp(sp) for sp in target_species]
result = []
transmuter = StandardTransmuter([])
if len(list(set(target_species) & set(self.get_allowed_species()))) != len(target_species):
raise ValueError(
"the species in target_species are not allowed " + "for the probability model you are using"
)
for permut in itertools.permutations(target_species):
for s in structures_list:
# check if: species are in the domain,
# and the probability of subst. is above the threshold
els = s["structure"].composition.elements
if (
len(els) == len(permut)
and len(list(set(els) & set(self.get_allowed_species()))) == len(els)
and self._sp.cond_prob_list(permut, els) > self._threshold
):
clean_subst = {els[i]: permut[i] for i in range(0, len(els)) if els[i] != permut[i]}
if len(clean_subst) == 0:
continue
transf = SubstitutionTransformation(clean_subst)
if Substitutor._is_charge_balanced(transf.apply_transformation(s["structure"])):
ts = TransformedStructure(
s["structure"],
[transf],
history=[{"source": s["id"]}],
other_parameters={
"type": "structure_prediction",
"proba": self._sp.cond_prob_list(permut, els),
},
)
result.append(ts)
transmuter.append_transformed_structures([ts])
if remove_duplicates:
transmuter.apply_filter(RemoveDuplicatesFilter(symprec=self._symprec))
if remove_existing:
# Make the list of structures from structures_list that corresponds to the
# target species
chemsys = {sp.symbol for sp in target_species}
structures_list_target = [
st["structure"]
for st in structures_list
if Substitutor._is_from_chemical_system(chemsys, st["structure"])
]
transmuter.apply_filter(RemoveExistingFilter(structures_list_target, symprec=self._symprec))
return transmuter.transformed_structures
@staticmethod
def _is_charge_balanced(struct):
"""
checks if the structure object is charge balanced
"""
return sum(s.specie.oxi_state for s in struct.sites) == 0.0
@staticmethod
def _is_from_chemical_system(chemical_system, struct):
"""
checks if the structure object is from the given chemical system
"""
return {sp.symbol for sp in struct.composition} == set(chemical_system)
def pred_from_list(self, species_list):
"""
There are an exceptionally large number of substitutions to
look at (260^n), where n is the number of species in the
list. We need a more efficient than brute force way of going
through these possibilities. The brute force method would be::
output = []
for p in itertools.product(self._sp.species_list
, repeat = len(species_list)):
if self._sp.conditional_probability_list(p, species_list)
> self._threshold:
output.append(dict(zip(species_list,p)))
return output
Instead of that we do a branch and bound.
Args:
species_list:
list of species in the starting structure
Returns:
list of dictionaries, each including a substitutions
dictionary, and a probability value
"""
species_list = [get_el_sp(sp) for sp in species_list]
# calculate the highest probabilities to help us stop the recursion
max_probabilities = []
for s2 in species_list:
max_p = 0
for s1 in self._sp.species:
max_p = max([self._sp.cond_prob(s1, s2), max_p])
max_probabilities.append(max_p)
output = []
def _recurse(output_prob, output_species):
best_case_prob = list(max_probabilities)
best_case_prob[: len(output_prob)] = output_prob
if functools.reduce(mul, best_case_prob) > self._threshold:
if len(output_species) == len(species_list):
odict = {
"substitutions": dict(zip(species_list, output_species)),
"probability": functools.reduce(mul, best_case_prob),
}
output.append(odict)
return
for sp in self._sp.species:
i = len(output_prob)
prob = self._sp.cond_prob(sp, species_list[i])
_recurse(output_prob + [prob], output_species + [sp])
_recurse([], [])
logging.info(f"{len(output)} substitutions found")
return output
def pred_from_comp(self, composition):
"""
Similar to pred_from_list except this method returns a list after
checking that compositions are charge balanced.
"""
output = []
predictions = self.pred_from_list(composition.elements)
for p in predictions:
subs = p["substitutions"]
charge = 0
for i_el in composition.elements:
f_el = subs[i_el]
charge += f_el.oxi_state * composition[i_el]
if charge == 0:
output.append(p)
logging.info(f"{len(output)} charge balanced compositions found")
return output
def as_dict(self):
"""
Returns: MSONable dict
"""
return {
"name": self.__class__.__name__,
"version": __version__,
"kwargs": self._kwargs,
"threshold": self._threshold,
"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
}
@classmethod
def from_dict(cls, d):
"""
Args:
d (dict): Dict representation
Returns:
Class
"""
t = d["threshold"]
kwargs = d["kwargs"]
return cls(threshold=t, **kwargs)
|
vorwerkc/pymatgen
|
pymatgen/analysis/structure_prediction/substitutor.py
|
Python
|
mit
| 10,471
|
[
"pymatgen"
] |
0e54085c0a9a910ccbde93183074083bc3d8da860cde2bdf514578f967c2704a
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2009-2013 Zuza Software Foundation
# Copyright 2013 Evernote Corporation
#
# This file is part of Pootle.
#
# Pootle is free software; you can redistribute it and/or modify it under the
# terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# Pootle is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# Pootle; if not, see <http://www.gnu.org/licenses/>.
from django.conf import settings
from django.core.cache import cache
from django.core.urlresolvers import reverse
from django.db import models
from django.utils.translation import ugettext_lazy as _
from pootle.core.managers import RelatedManager
from pootle.core.markup import get_markup_filter_name, MarkupField
from pootle.core.url_helpers import get_editor_filter
from pootle.i18n.gettext import tr_lang, language_dir
from pootle_misc.aggregate import max_column
from pootle_misc.baseurl import l
from pootle_misc.util import getfromcache
from pootle_store.models import Unit, Suggestion
from pootle_store.util import statssum, OBSOLETE
# FIXME: Generate key dynamically
CACHE_KEY = 'pootle-languages'
class LanguageManager(RelatedManager):
def get_by_natural_key(self, code):
return self.get(code=code)
class LiveLanguageManager(models.Manager):
"""Manager that only considers `live` languages.
A live language is any language other than the special `Templates`
language that have any project with translatable files and is not a
source language.
Note that this doesn't inherit from :cls:`RelatedManager`.
"""
def get_query_set(self):
return super(LiveLanguageManager, self).get_query_set().filter(
~models.Q(code='templates'),
translationproject__isnull=False,
project__isnull=True,
).distinct()
def cached(self):
languages = cache.get(CACHE_KEY)
if not languages:
languages = self.all()
cache.set(CACHE_KEY, languages, settings.OBJECT_CACHE_TIMEOUT)
return languages
class Language(models.Model):
code = models.CharField(
max_length=50,
null=False,
unique=True,
db_index=True,
verbose_name=_("Code"),
help_text=_('ISO 639 language code for the language, possibly '
'followed by an underscore (_) and an ISO 3166 country '
'code. <a href="http://www.w3.org/International/articles/'
'language-tags/">More information</a>'),
)
fullname = models.CharField(
max_length=255,
null=False,
verbose_name=_("Full Name"),
)
description = MarkupField(
blank=True,
help_text=_('A description of this language. This is useful to give '
'more information or instructions. Allowed markup: %s',
get_markup_filter_name()),
)
specialchars = models.CharField(
max_length=255,
blank=True,
verbose_name=_("Special Characters"),
help_text=_('Enter any special characters that users might find '
'difficult to type'),
)
nplurals = models.SmallIntegerField(
default=0,
choices=(
(0, _('Unknown')), (1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6)
),
verbose_name=_("Number of Plurals"),
help_text=_('For more information, visit <a href="'
'http://docs.translatehouse.org/projects/'
'localization-guide/en/latest/l10n/pluralforms.html">our '
'page</a> on plural forms.'),
)
pluralequation = models.CharField(
max_length=255,
blank=True,
verbose_name=_("Plural Equation"),
help_text=_('For more information, visit <a href="'
'http://docs.translatehouse.org/projects/'
'localization-guide/en/latest/l10n/pluralforms.html">our '
'page</a> on plural forms.'),
)
directory = models.OneToOneField(
'pootle_app.Directory',
db_index=True,
editable=False,
)
objects = LanguageManager()
live = LiveLanguageManager()
class Meta:
ordering = ['code']
db_table = 'pootle_app_language'
def natural_key(self):
return (self.code,)
natural_key.dependencies = ['pootle_app.Directory']
############################ Properties ###################################
@property
def pootle_path(self):
return '/%s/' % self.code
@property
def name(self):
"""Localized fullname for the language."""
return tr_lang(self.fullname)
@property
def direction(self):
"""Return the language direction."""
return language_dir(self.code)
############################ Methods ######################################
def __repr__(self):
return u'<%s: %s>' % (self.__class__.__name__, self.fullname)
def __unicode__(self):
return u"%s - %s" % (self.name, self.code)
def save(self, *args, **kwargs):
# create corresponding directory object.
from pootle_app.models.directory import Directory
self.directory = Directory.objects.root.get_or_make_subdir(self.code)
super(Language, self).save(*args, **kwargs)
# FIXME: far from ideal, should cache at the manager level instead.
cache.delete(CACHE_KEY)
def delete(self, *args, **kwargs):
directory = self.directory
super(Language, self).delete(*args, **kwargs)
directory.delete()
# FIXME: far from ideal, should cache at the manager level instead.
cache.delete(CACHE_KEY)
def get_absolute_url(self):
return l(self.pootle_path)
def get_translate_url(self, **kwargs):
return u''.join([
reverse('pootle-language-translate', args=[self.code]),
get_editor_filter(**kwargs),
])
@getfromcache
def get_mtime(self):
return max_column(Unit.objects.filter(
store__translation_project__language=self), 'mtime', None)
@getfromcache
def getquickstats(self):
return statssum(self.translationproject_set.iterator())
@getfromcache
def get_suggestion_count(self):
"""Check the number of suggestions for this language.
This checks all units in the stores for all the translation projects in
this language.
"""
criteria = {
'unit__store__translation_project__language': self,
'unit__state__gt': OBSOLETE,
}
return Suggestion.objects.filter(**criteria).count()
def translated_percentage(self):
qs = self.getquickstats()
word_count = max(qs['totalsourcewords'], 1)
return int(100.0 * qs['translatedsourcewords'] / word_count)
|
arky/pootle-dev
|
pootle/apps/pootle_language/models.py
|
Python
|
gpl-2.0
| 7,236
|
[
"VisIt"
] |
cad3d33e3ac6a946d461ac61b98e15c088a64d2b9efc1c745d060e9610be5ed4
|
"""
This is a sample implementation for working DGL with DeepChem!
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from deepchem.models.losses import Loss, L2Loss, SparseSoftmaxCrossEntropy
from deepchem.models.torch_models.torch_model import TorchModel
class CGCNNLayer(nn.Module):
"""The convolutional layer of CGCNN.
This class was implemented using DGLGraph methods.
Please confirm how to use DGLGraph methods from below link.
See: https://docs.dgl.ai/en/0.4.x/tutorials/models/1_gnn/9_gat.html
Examples
--------
>>> import deepchem as dc
>>> import pymatgen as mg
>>> lattice = mg.Lattice.cubic(4.2)
>>> structure = mg.Structure(lattice, ["Cs", "Cl"], [[0, 0, 0], [0.5, 0.5, 0.5]])
>>> featurizer = dc.feat.CGCNNFeaturizer()
>>> cgcnn_graph = featurizer.featurize([structure])[0]
>>> cgcnn_graph.num_node_features
92
>>> cgcnn_graph.num_edge_features
41
>>> cgcnn_dgl_graph = cgcnn_graph.to_dgl_graph()
>>> print(type(cgcnn_dgl_graph))
<class 'dgl.heterograph.DGLHeteroGraph'>
>>> layer = CGCNNLayer(hidden_node_dim=92, edge_dim=41)
>>> node_feats = cgcnn_dgl_graph.ndata.pop('x')
>>> edge_feats = cgcnn_dgl_graph.edata.pop('edge_attr')
>>> new_node_feats, new_edge_feats = layer(cgcnn_dgl_graph, node_feats, edge_feats)
Notes
-----
This class requires DGL and PyTorch to be installed.
"""
def __init__(self,
hidden_node_dim: int,
edge_dim: int,
batch_norm: bool = True):
"""
Parameters
----------
hidden_node_dim: int
The length of the hidden node feature vectors.
edge_dim: int
The length of the edge feature vectors.
batch_norm: bool, default True
Whether to apply batch normalization or not.
"""
super(CGCNNLayer, self).__init__()
z_dim = 2 * hidden_node_dim + edge_dim
liner_out_dim = 2 * hidden_node_dim
self.linear = nn.Linear(z_dim, liner_out_dim)
self.batch_norm = nn.BatchNorm1d(liner_out_dim) if batch_norm else None
def message_func(self, edges):
z = torch.cat(
[edges.src['x'], edges.dst['x'], edges.data['edge_attr']], dim=1)
z = self.linear(z)
if self.batch_norm is not None:
z = self.batch_norm(z)
gated_z, message_z = z.chunk(2, dim=1)
gated_z = torch.sigmoid(gated_z)
message_z = F.softplus(message_z)
return {'message': gated_z * message_z}
def reduce_func(self, nodes):
nbr_sumed = torch.sum(nodes.mailbox['message'], dim=1)
new_x = F.softplus(nodes.data['x'] + nbr_sumed)
return {'new_x': new_x}
def forward(self, dgl_graph, node_feats, edge_feats):
"""Update node representations.
Parameters
----------
dgl_graph: DGLGraph
DGLGraph for a batch of graphs.
node_feats: torch.Tensor
The node features. The shape is `(N, hidden_node_dim)`.
edge_feats: torch.Tensor
The edge features. The shape is `(N, hidden_node_dim)`.
Returns
-------
node_feats: torch.Tensor
The updated node features. The shape is `(N, hidden_node_dim)`.
"""
dgl_graph.ndata['x'] = node_feats
dgl_graph.edata['edge_attr'] = edge_feats
dgl_graph.update_all(self.message_func, self.reduce_func)
node_feats = dgl_graph.ndata.pop('new_x')
return node_feats
class CGCNN(nn.Module):
"""Crystal Graph Convolutional Neural Network (CGCNN).
This model takes arbitary crystal structures as an input, and predict material properties
using the element information and connection of atoms in the crystal. If you want to get
some material properties which has a high computational cost like band gap in the case
of DFT, this model may be useful. This model is one of variants of Graph Convolutional
Networks. The main differences between other GCN models are how to construct graphs and
how to update node representations. This model defines the crystal graph from structures
using distances between atoms. The crystal graph is an undirected multigraph which is defined
by nodes representing atom properties and edges representing connections between atoms
in a crystal. And, this model updates the node representations using both neighbor node
and edge representations. Please confirm the detail algorithms from [1]_.
Examples
--------
>>> import deepchem as dc
>>> import pymatgen as mg
>>> lattice = mg.Lattice.cubic(4.2)
>>> structure = mg.Structure(lattice, ["Cs", "Cl"], [[0, 0, 0], [0.5, 0.5, 0.5]])
>>> featurizer = dc.feat.CGCNNFeaturizer()
>>> cgcnn_feat = featurizer.featurize([structure])[0]
>>> print(type(cgcnn_feat))
<class 'deepchem.feat.graph_data.GraphData'>
>>> cgcnn_dgl_feat = cgcnn_feat.to_dgl_graph()
>>> print(type(cgcnn_dgl_feat))
<class 'dgl.heterograph.DGLHeteroGraph'>
>>> model = dc.models.CGCNN(mode='regression', n_tasks=2)
>>> out = model(cgcnn_dgl_feat)
>>> print(type(out))
<class 'torch.Tensor'>
>>> out.shape == (1, 2)
True
References
----------
.. [1] Xie, Tian, and Jeffrey C. Grossman. "Crystal graph convolutional neural networks
for an accurate and interpretable prediction of material properties." Physical review letters
120.14 (2018): 145301.
Notes
-----
This class requires DGL and PyTorch to be installed.
"""
def __init__(
self,
in_node_dim: int = 92,
hidden_node_dim: int = 64,
in_edge_dim: int = 41,
num_conv: int = 3,
predictor_hidden_feats: int = 128,
n_tasks: int = 1,
mode: str = 'regression',
n_classes: int = 2,
):
"""
Parameters
----------
in_node_dim: int, default 92
The length of the initial node feature vectors. The 92 is
based on length of vectors in the atom_init.json.
hidden_node_dim: int, default 64
The length of the hidden node feature vectors.
in_edge_dim: int, default 41
The length of the initial edge feature vectors. The 41 is
based on default setting of CGCNNFeaturizer.
num_conv: int, default 3
The number of convolutional layers.
predictor_hidden_feats: int, default 128
The size for hidden representations in the output MLP predictor.
n_tasks: int, default 1
The number of the output size.
mode: str, default 'regression'
The model type, 'classification' or 'regression'.
n_classes: int, default 2
The number of classes to predict (only used in classification mode).
"""
try:
import dgl
except:
raise ImportError("This class requires DGL to be installed.")
super(CGCNN, self).__init__()
if mode not in ['classification', 'regression']:
raise ValueError("mode must be either 'classification' or 'regression'")
self.n_tasks = n_tasks
self.mode = mode
self.n_classes = n_classes
self.embedding = nn.Linear(in_node_dim, hidden_node_dim)
self.conv_layers = nn.ModuleList([
CGCNNLayer(
hidden_node_dim=hidden_node_dim,
edge_dim=in_edge_dim,
batch_norm=True) for _ in range(num_conv)
])
self.pooling = dgl.mean_nodes
self.fc = nn.Linear(hidden_node_dim, predictor_hidden_feats)
if self.mode == 'regression':
self.out = nn.Linear(predictor_hidden_feats, n_tasks)
else:
self.out = nn.Linear(predictor_hidden_feats, n_tasks * n_classes)
def forward(self, dgl_graph):
"""Predict labels
Parameters
----------
dgl_graph: DGLGraph
DGLGraph for a batch of graphs. The graph expects that the node features
are stored in `ndata['x']`, and the edge features are stored in `edata['edge_attr']`.
Returns
-------
out: torch.Tensor
The output values of this model.
If mode == 'regression', the shape is `(batch_size, n_tasks)`.
If mode == 'classification', the shape is `(batch_size, n_tasks, n_classes)` (n_tasks > 1)
or `(batch_size, n_classes)` (n_tasks == 1) and the output values are probabilities of each class label.
"""
graph = dgl_graph
# embedding node features
node_feats = graph.ndata.pop('x')
edge_feats = graph.edata.pop('edge_attr')
node_feats = self.embedding(node_feats)
# convolutional layer
for conv in self.conv_layers:
node_feats = conv(graph, node_feats, edge_feats)
# pooling
graph.ndata['updated_x'] = node_feats
graph_feat = F.softplus(self.pooling(graph, 'updated_x'))
graph_feat = F.softplus(self.fc(graph_feat))
out = self.out(graph_feat)
if self.mode == 'regression':
return out
else:
logits = out.view(-1, self.n_tasks, self.n_classes)
# for n_tasks == 1 case
logits = torch.squeeze(logits)
proba = F.softmax(logits)
return proba, logits
class CGCNNModel(TorchModel):
"""Crystal Graph Convolutional Neural Network (CGCNN).
Here is a simple example of code that uses the CGCNNModel with
materials dataset.
>> import deepchem as dc
>> dataset_config = {"reload": False, "featurizer": dc.feat.CGCNNFeaturizer, "transformers": []}
>> tasks, datasets, transformers = dc.molnet.load_perovskite(**dataset_config)
>> train, valid, test = datasets
>> model = dc.models.CGCNNModel(mode='regression', batch_size=32, learning_rate=0.001)
>> model.fit(train, nb_epoch=50)
This model takes arbitary crystal structures as an input, and predict material properties
using the element information and connection of atoms in the crystal. If you want to get
some material properties which has a high computational cost like band gap in the case
of DFT, this model may be useful. This model is one of variants of Graph Convolutional
Networks. The main differences between other GCN models are how to construct graphs and
how to update node representations. This model defines the crystal graph from structures
using distances between atoms. The crystal graph is an undirected multigraph which is defined
by nodes representing atom properties and edges representing connections between atoms
in a crystal. And, this model updates the node representations using both neighbor node
and edge representations. Please confirm the detail algorithms from [1]_.
References
----------
.. [1] Xie, Tian, and Jeffrey C. Grossman. "Crystal graph convolutional neural networks
for an accurate and interpretable prediction of material properties." Physical review letters
120.14 (2018): 145301.
Notes
-----
This class requires DGL and PyTorch to be installed.
"""
def __init__(self,
in_node_dim: int = 92,
hidden_node_dim: int = 64,
in_edge_dim: int = 41,
num_conv: int = 3,
predictor_hidden_feats: int = 128,
n_tasks: int = 1,
mode: str = 'regression',
n_classes: int = 2,
**kwargs):
"""
This class accepts all the keyword arguments from TorchModel.
Parameters
----------
in_node_dim: int, default 92
The length of the initial node feature vectors. The 92 is
based on length of vectors in the atom_init.json.
hidden_node_dim: int, default 64
The length of the hidden node feature vectors.
in_edge_dim: int, default 41
The length of the initial edge feature vectors. The 41 is
based on default setting of CGCNNFeaturizer.
num_conv: int, default 3
The number of convolutional layers.
predictor_hidden_feats: int, default 128
The size for hidden representations in the output MLP predictor.
n_tasks: int, default 1
The number of the output size.
mode: str, default 'regression'
The model type, 'classification' or 'regression'.
n_classes: int, default 2
The number of classes to predict (only used in classification mode).
kwargs: Dict
This class accepts all the keyword arguments from TorchModel.
"""
model = CGCNN(in_node_dim, hidden_node_dim, in_edge_dim, num_conv,
predictor_hidden_feats, n_tasks, mode, n_classes)
if mode == "regression":
loss: Loss = L2Loss()
output_types = ['prediction']
else:
loss = SparseSoftmaxCrossEntropy()
output_types = ['prediction', 'loss']
super(CGCNNModel, self).__init__(
model, loss=loss, output_types=output_types, **kwargs)
def _prepare_batch(self, batch):
"""Create batch data for CGCNN.
Parameters
----------
batch: Tuple
The tuple are `(inputs, labels, weights)`.
Returns
-------
inputs: DGLGraph
DGLGraph for a batch of graphs.
labels: List[torch.Tensor] or None
The labels converted to torch.Tensor
weights: List[torch.Tensor] or None
The weights for each sample or sample/task pair converted to torch.Tensor
"""
try:
import dgl
except:
raise ImportError("This class requires DGL to be installed.")
inputs, labels, weights = batch
dgl_graphs = [graph.to_dgl_graph() for graph in inputs[0]]
inputs = dgl.batch(dgl_graphs).to(self.device)
_, labels, weights = super(CGCNNModel, self)._prepare_batch(([], labels,
weights))
return inputs, labels, weights
|
lilleswing/deepchem
|
deepchem/models/torch_models/cgcnn.py
|
Python
|
mit
| 13,202
|
[
"CRYSTAL",
"pymatgen"
] |
13d2de6ffd904b79d323d73f59c4ed5bd6c859b512994678eb63d9ac68340176
|
"""
===============================================
Compute all-to-all connectivity in sensor space
===============================================
Computes the Phase Lag Index (PLI) between all gradiometers and shows the
connectivity in 3D using the helmet geometry. The left visual stimulation data
are used which produces strong connectvitiy in the right occipital sensors.
"""
# Author: Martin Luessi <mluessi@nmr.mgh.harvard.edu>
#
# License: BSD (3-clause)
import numpy as np
from scipy import linalg
import mne
from mne import io
from mne.connectivity import spectral_connectivity
from mne.datasets import sample
print(__doc__)
###############################################################################
# Set parameters
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
# Setup for reading the raw data
raw = io.Raw(raw_fname)
events = mne.read_events(event_fname)
# Add a bad channel
raw.info['bads'] += ['MEG 2443']
# Pick MEG gradiometers
picks = mne.pick_types(raw.info, meg='grad', eeg=False, stim=False, eog=True,
exclude='bads')
# Create epochs for the visual condition
event_id, tmin, tmax = 3, -0.2, 0.5
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), reject=dict(grad=4000e-13, eog=150e-6))
# Compute connectivity for band containing the evoked response.
# We exclude the baseline period
fmin, fmax = 3., 9.
sfreq = raw.info['sfreq'] # the sampling frequency
tmin = 0.0 # exclude the baseline period
con, freqs, times, n_epochs, n_tapers = spectral_connectivity(
epochs, method='pli', mode='multitaper', sfreq=sfreq, fmin=fmin, fmax=fmax,
faverage=True, tmin=tmin, mt_adaptive=False, n_jobs=2)
# the epochs contain an EOG channel, which we remove now
ch_names = epochs.ch_names
idx = [ch_names.index(name) for name in ch_names if name.startswith('MEG')]
con = con[idx][:, idx]
# con is a 3D array where the last dimension is size one since we averaged
# over frequencies in a single band. Here we make it 2D
con = con[:, :, 0]
# Now, visualize the connectivity in 3D
from mayavi import mlab # noqa
mlab.figure(size=(600, 600), bgcolor=(0.5, 0.5, 0.5))
# Plot the sensor locations
sens_loc = [raw.info['chs'][picks[i]]['loc'][:3] for i in idx]
sens_loc = np.array(sens_loc)
pts = mlab.points3d(sens_loc[:, 0], sens_loc[:, 1], sens_loc[:, 2],
color=(1, 1, 1), opacity=1, scale_factor=0.005)
# Get the strongest connections
n_con = 20 # show up to 20 connections
min_dist = 0.05 # exclude sensors that are less than 5cm apart
threshold = np.sort(con, axis=None)[-n_con]
ii, jj = np.where(con >= threshold)
# Remove close connections
con_nodes = list()
con_val = list()
for i, j in zip(ii, jj):
if linalg.norm(sens_loc[i] - sens_loc[j]) > min_dist:
con_nodes.append((i, j))
con_val.append(con[i, j])
con_val = np.array(con_val)
# Show the connections as tubes between sensors
vmax = np.max(con_val)
vmin = np.min(con_val)
for val, nodes in zip(con_val, con_nodes):
x1, y1, z1 = sens_loc[nodes[0]]
x2, y2, z2 = sens_loc[nodes[1]]
points = mlab.plot3d([x1, x2], [y1, y2], [z1, z2], [val, val],
vmin=vmin, vmax=vmax, tube_radius=0.001,
colormap='RdBu')
points.module_manager.scalar_lut_manager.reverse_lut = True
mlab.scalarbar(title='Phase Lag Index (PLI)', nb_labels=4)
# Add the sensor names for the connections shown
nodes_shown = list(set([n[0] for n in con_nodes] +
[n[1] for n in con_nodes]))
for node in nodes_shown:
x, y, z = sens_loc[node]
mlab.text3d(x, y, z, raw.ch_names[picks[node]], scale=0.005,
color=(0, 0, 0))
view = (-88.7, 40.8, 0.76, np.array([-3.9e-4, -8.5e-3, -1e-2]))
mlab.view(*view)
|
rajegannathan/grasp-lift-eeg-cat-dog-solution-updated
|
python-packages/mne-python-0.10/examples/connectivity/plot_sensor_connectivity.py
|
Python
|
bsd-3-clause
| 3,926
|
[
"Mayavi"
] |
d9ba2b15a2a94b5b68c333bcb378b1dacb119f81e10422feb85ed56deef6547c
|
data = """
type: opls_111
! O TIP3P Water
template: [O (H)(H)]
type: opls_112
! H TIP3P Water
template: [H [O(H)]]
type: opls_135
! alkane CH3
template: [C (C)(H)(H)(H)]
type: opls_136
! alkane CH2
template: [C (C)(C)(H)(H)]
type: opls_137
! alkane CH
template: [C (C)(C)(C)(H)]
type: opls_138
! alkane CH4
template: [C (H)(H)(H)(H)]
# opls_139 12.01100 ; alkane C
type: opls_140
! alkane H
template: [H [C (*)(CH)(CH)]]
type: opls_141
! alkene C (R2-C=)
template: [C (^H)(^H)(C)]
type: opls_142
! alkene C (RH-C=)
template: [C (^H)(^H)(H)]
type: opls_143
! alkene C (H2-C=)
template: [C (^H)(H)(H)]
type: opls_144
! alkene H (H-C=)
template: [H [C(*)(*)]]
type: opls_145
! Benzene C - 12 site JACS,112,4768-90
template: [CR6 (C)(C)(H)]
# This type doesn't make sense, it should not have a charge...
# opls_145B 12.01100 ; Biphenyl C1
type: opls_146
! Benzene H - 12 site.
template: [H [CR6 (C)(C)]]
type: opls_147
! Naphthalene fusion C (C9)
template: [CR6 (CR6)(CR6)(CR6)]
type: opls_148
! C: CH3, toluene
template: [C (CR6)(H)(H)(H)]
# hydrogens
precedence: ((opls_146) (opls_144))
type: opls_149
! C: CH2, ethyl benzene
template: [C (CR6)[C(H)(H)(H)](H)]
type: opls_150
! diene =CH-CH=; use #178 for =CR-CR=
template: [C (H)[C(*)(*)][C(C)(H)]]
# opls_151 35.45300 ; Cl in alkyl chlorides
# opls_152 12.01100 ; RCH2Cl in alkyl chlorides
# opls_153 1.00800 ; H in RCH2Cl in alkyl chlorides
type: opls_154
! all-atom O: mono alcohols
template: [O (H)(C)]
type: opls_155
! all-atom H(O): mono alcohols, OP(=O)2
template: [H [O(C)]]
type: opls_156
! all-atom H(C): methanol
template: [H [C[O(H)](H)(H)]]
type: opls_157
! all-atom C: CH3 & CH2, alcohols
template: [C [O(H)](CH)(H)(H)]
type: opls_158
! all-atom C: CH, alcohols
template: [C [O(H)](C)(C)(H)]
type: opls_159
! all-atom C: C, alcohols
template: [C [O(H)](C)(C)(C)]
#opls_160 12.01100 ; CH2 Trifluoroethanol
#opls_161 12.01100 ; CF3 Trifluoroethanol
#opls_162 15.99940 ; OH Trifluoroethanol
#opls_163 1.00800 ; HO Trifluoroethanol
#opls_164 18.99840 ; F Trifluoroethanol
#opls_165 1.00800 ; H Trifluoroethanol
type: opls_166
! C(OH) phenol Use with all
template: [CR6 [O(H)](C)(C)]
# sp2 carbons
precedence: (
(opls_166) (opls_147) (opls_145) # Aromatics
(opls_178) (opls_150) # Dienes
(opls_141) (opls_142) # Alkanes
)
type: opls_167
!O phenol
template: [O (H)(CR6)]
# hydroxyl oxygens
precedence: ((opls_167) (opls_154))
# hydroxyl hydrogens
precedence: ((opls_168) (opls_155))
type: opls_168
! H phenol
template: [H [O(CR6)]]
# *** It is impossible to detect arbitrary polyols with the template syntax ***
# opls_169 15.99940 ; O: diols
# opls_170 1.00800 ; H(O): diols
# opls_171 15.99940 ; O: triols
# opls_172 1.00800 ; H(O): triols
# opls_173 12.01100 ; C(H2OH): triols
# opls_174 12.01100 ; C(HROH): triols
# opls_175 12.01100 ; C(R2OH): triols
# opls_176 1.00800 ; H(CXOH): triols
type: opls_178
! diene =CR-CR=; use #150 for =CH-CH=
template: [C (C)[C(*)(*)][C(C)(C)]]
# opls_179 15.99940 ; O: anisole
type: opls_180
! O: dialkyl ether
template: [O (C)(C)]
type: opls_181
! C(H3OR): methyl ether
template: [C (O(C))(H)(H)(H)]
type: opls_182
! C(H2OR): ethyl ether
template: [C (O(C))(C)(H)(H)]
type: opls_183
! C(HOR): i-Pr ether, allose
template: [C (O(C))(C)(C)(H)]
type: opls_184
! C(OR): t-Bu ether
template: (C (O(C))(C)(C)(C))
opls_185 1.00800 ; H(COR): alpha H ether
"""
"""
opls_186 15.99940 ; O: acetal ether
opls_187 15.99940 ; O(H): hemiacetal
opls_188 1.00800 ; H(O): hemiacetal
opls_189 12.01100 ; C(H2O2): acetal OCH2O
opls_190 1.00800 ; H(CHO2): acetal OCH2O
opls_191 12.01100 ; C(H2O2): hemiacetal OCH2OH
opls_192 1.00800 ; H(CHO2): hemiacetal OCH2OH
opls_193 12.01100 ; C(HCO2): acetal OCHRO
opls_194 1.00800 ; H(CHO2): acetal OCHRO
opls_195 12.01100 ; C(HCO2): hemiacetal OCHROH
opls_196 1.00800 ; H(C2O2): hemiacetal OCHROH
opls_197 12.01100 ; C(C2O2): acetal OCRRO
opls_198 12.01100 ; C(C2O2): hemiacetal OCRROH
opls_199 12.01100 ; C(O,Me): anisole
opls_200 32.06000 ; all-atom S: thiols
opls_201 32.06000 ; S IN H2S JPC,90,6379 (1986)
opls_202 32.06000 ; all-atom S: sulfides, S=C
opls_203 32.06000 ; all-atom S: disulfides
opls_204 1.00800 ; all-atom H(S): thiols
opls_205 1.00800 ; H IN H2S JPC,90,6379 (1986)
opls_206 12.01100 ; all-atom C: CH2, thiols
opls_207 12.01100 ; all-atom C: CH, thiols
opls_208 12.01100 ; all-atom C: C, thiols
opls_209 12.01100 ; all-atom C: CH3, sulfides
opls_210 12.01100 ; all-atom C: CH2, sulfides
opls_211 12.01100 ; all-atom C: CH, sulfides
opls_212 12.01100 ; all-atom C: C, sulfides
opls_213 12.01100 ; all-atom C: CH3, disulfides
opls_214 12.01100 ; all-atom C: CH2, disulfides
opls_215 12.01100 ; all-atom C: CH, disulfides
opls_216 12.01100 ; all-atom C: C, disulfides
opls_217 12.01100 ; all-atom C: CH3, methanethiol
opls_218 12.01100 ; C in CH2OH - benzyl alcohols
opls_219 12.01100 ; C in CHROH - benzyl alcohols
opls_220 12.01100 ; C in CR2OH - benzyl alcohols
opls_221 12.01100 ; C(CH2OH) - benzyl alcohols
opls_222 32.06000 ; S in thioanisoles
opls_223 12.01100 ; C in RCH2NH2. Use #223B for AA Calpha.
opls_223B 12.01100 ; Gly Calpha
opls_224 12.01100 ; C in R2CHNH2. Use #224B for AA Calpha.
opls_224B 12.01100 ; Calpha in most AA (except Gly,Pro,Aib)
opls_225 12.01100 ; C in R3CNH2. Use #225B for AA Calpha.
opls_225B 12.01100 ; Aib Calpha.
opls_226 35.45300 ; chloroalkene Cl (ClH-C=) - see also #398
opls_227 12.01100 ; chloroalkene C (ClH-C=)
opls_228 12.01100 ; C(SMe) thioanisole
opls_229 12.01100 ; C on N: secondary N-CHR2 amide
opls_230 12.01100 ; C on N: secondary N-CR3 amide
opls_231 12.01100 ; C: C=O in benzophenone
opls_232 12.01100 ; C: C=O in benzaldehyde,acetophenone (CH)
opls_233 12.01100 ; C: C=O in acetophenone (CMe)
opls_234 12.01100 ; C: C=O in benzamide
opls_235 12.01100 ; C=O in amide, dmf, peptide bond
opls_236 15.99940 ; O: C=O in amide. Acyl R on C in amide is neutral -
opls_237 14.00670 ; N: primary amide. use alkane parameters.
opls_238 14.00670 ; N: secondary amide, peptide bond (see #279 for formyl H)
opls_239 14.00670 ; N: tertiary amide
opls_240 1.00800 ; H on N: primary amide
opls_241 1.00800 ; H on N: secondary amide
opls_242 12.01100 ; C on N: secondary N-Me amide
opls_243 12.01100 ; C on N: tertiary N-Me amide
opls_244 12.01100 ; C on N: secondary N-CH2R amide
opls_245 12.01100 ; C on N: tertiary N-CH2R amide, Pro CD
opls_246 12.01100 ; C on N: tertiary N-CHR2 amide, Pro CA
opls_247 12.01100 ; C in O=C(NH2)2 Urea
opls_248 15.99940 ; O in O=C(NH2)2 Urea Isr. J. Chem
opls_249 14.00670 ; N in O=C(NH2)2 Urea 33, 323 (93)
opls_250 1.00800 ; H in O=C(NH2)2 Urea
opls_251 14.00670 ; N in imide
opls_252 12.01100 ; C(=O) in imide
opls_253 15.99940 ; O in imide
opls_254 1.00800 ; H(N) in imide
opls_255 1.00800 ; H(C) in formimide
opls_256 12.01100 ; C in CH3 imide
opls_257 12.01100 ; C in RCH2 imide
opls_258 12.01100 ; C in R2CH imide
opls_259 12.01100 ; C in R3C imide
opls_260 12.01100 ; C(CN) benzonitrile
opls_261 12.01100 ; C(N) benzonitrile
opls_262 14.00670 ; N benzonitrile
opls_263 12.01100 ; C(Cl) chlorobenzene
opls_264 35.45300 ; Cl chlorobenzene
opls_265 14.00670 ; N: N-phenylacetamide
opls_266 12.01100 ; ipso C in N-phenylacetamide
opls_267 12.01100 ; Co in CCOOH carboxylic acid
opls_268 15.99940 ; Oh in CCOOH R in RCOOH is
opls_269 15.99940 ; Oc in CCOOH neutral; use #135-#140
opls_270 1.00800 ; H in CCOOH
opls_271 12.01100 ; C in COO- carboxylate
opls_272 15.99940 ; O: O in COO- carboxylate,peptide terminus
opls_273 12.01100 ; C: CH3, carboxylate ion
opls_274 12.01100 ; C: CH2, carboxylate ion
opls_275 12.01100 ; C: CH, carboxylate ion
opls_276 12.01100 ; C: C, carboxylate ion
opls_277 12.01100 ; AA C: aldehyde - for C-alpha use #135-#139
opls_278 15.99940 ; AA O: aldehyde
opls_279 1.00800 ; AA H-alpha in aldehyde & formamide
opls_280 12.01100 ; AA C: ketone - for C-alpha use #135-#139
opls_281 15.99940 ; AA O: ketone
opls_282 1.00800 ; AA H on C-alpha in ketone & aldehyde
opls_283 12.01100 ; CA on C-terminal ALA,CYS,SER,THR,HIS,ASP,ASN
opls_284 12.01100 ; CA on C-terminal GLY
opls_285 12.01100 ; CA on C-terminal PRO
opls_286 14.00670 ; N (NH4+) JPC,90,2174 (1986)
opls_287 14.00670 ; N (RNH3+) JPC,90,2174 (1986)
opls_288 14.00670 ; N (R4N+) JPC,90,2174 (1986)
opls_289 1.00800 ; H (NH4+) JPC,90,2174 (1986)
opls_290 1.00800 ; H (RNH3+) JPC,90,2174 (1986)
opls_291 12.01100 ; C in CH3NH3+
opls_292 12.01100 ; C in RCH2NH3+
opls_292B 12.01100 ; CA in GLY-NH3+ N-term.
opls_293 12.01100 ; C in R2CHNH3+
opls_293B 12.01100 ; CA in NH3+ N-term, All AA except GLY & PRO
opls_294 12.01100 ; C in R3CNH3+
opls_295 12.01100 ; AA C-alpha on N-term PRO
opls_296 12.01100 ; AA:C-delta in N-term PRO NH2+
opls_297 12.01100 ; CT in CH3NH2+R
opls_298 12.01100 ; AA C-alpha in Gly zwitterion
opls_299 12.01100 ; AA C-alpha in Ala zwitterion
opls_300 14.00670 ; N: guanidinium NH2
opls_301 1.00800 ; H: guanidinium NH2
opls_302 12.01100 ; C: guanidinium C+
opls_303 14.00670 ; N: guanidinium NHR
opls_304 1.00800 ; H: guanidinium NHR
opls_305 12.01100 ; C: CH3, methylguanidinium
opls_306 12.01100 ; C: CH3, ethylguanidinium
opls_307 12.01100 ; C: CH2(D), ARG, ethylguanidinium
opls_308 12.01100 ; C: CH2(G), ARG
opls_309 14.00670 ; N (R2NH2+), N-terminal PRO NH2+
opls_310 1.00800 ; H (R2NH2+)
opls_311 14.00670 ; DAP N1 (Diaminopyridine)
opls_312 12.01100 ; DAP C2
opls_313 14.00670 ; DAP N-amine
opls_314 1.00800 ; DAP H-amine
opls_315 12.01100 ; DAP C3
opls_316 1.00800 ; DAP H3
opls_317 12.01100 ; DAP C4
opls_318 1.00800 ; DAP H4
opls_319 14.00670 ; Uracil & Thymine N1 - use #319B for nucleoside
opls_319B 14.00670 ; Uracil & Thymine N1 - only for nucleoside
opls_320 12.01100 ; Uracil & Thymine C2
opls_321 14.00670 ; Uracil & Thymine N3
opls_322 12.01100 ; Uracil & Thymine C4
opls_323 12.01100 ; Uracil & Thymine C5
opls_324 12.01100 ; Uracil & Thymine C6
opls_325 1.00800 ; Uracil & Thymine H-N1
opls_326 15.99940 ; Uracil O-C2
opls_327 1.00800 ; Uracil H-N3
opls_328 15.99940 ; Uracil O-C4
opls_329 1.00800 ; Uracil H-C5
opls_330 1.00800 ; Uracil H-C6
opls_331 12.01100 ; Thymine C-C5
opls_332 1.00800 ; Thymine H-CC5
opls_333 14.00670 ; Cytosine N1 -use #333B for nucleoside
opls_333B 14.00670 ; Cytosine N1 - for nucleoside
opls_334 12.01100 ; Cytosine C2
opls_335 14.00670 ; Cytosine N3
opls_336 12.01100 ; Cytosine C4 Nucleotide base
opls_337 12.01100 ; Cytosine C5 parameters:
opls_338 12.01100 ; Cytosine C6 JACS,113,2810(1991)
opls_339 1.00800 ; Cytosine H-N1
opls_340 15.99940 ; Cytosine O-C2
opls_341 14.00670 ; Cytosine N-C4
opls_342 1.00800 ; Cytosine H-NC4/N3
opls_343 1.00800 ; Cytosine H-NC4/C5
opls_344 1.00800 ; Cytosine H-C5
opls_345 1.00800 ; Cytosine H-C6
opls_346 14.00670 ; Adenine N1
opls_347 12.01100 ; Adenine C2
opls_348 14.00670 ; Adenine N3
opls_349 12.01100 ; Adenine C4
opls_350 12.01100 ; Adenine C5
opls_351 12.01100 ; Adenine C6
opls_352 14.00670 ; Adenine & Guanine N7
opls_353 12.01100 ; Adenine & Guanine C8
opls_354 14.00670 ; Adenine & Guanine N9 - use #354B for nucleoside
opls_354B 14.00670 ; Adenine & Guanine N9 - nucleoside only
opls_355 1.00800 ; Adenine & Guanine H-C2
opls_356 14.00670 ; Adenine & Guanine N-C6
opls_357 1.00800 ; Adenine & Guanine H-NC6/N1
opls_358 1.00800 ; Adenine & Guanine H-NC6/C5
opls_359 1.00800 ; Adenine & Guanine H-C8 Guanine
opls_360 1.00800 ; Adenine & Guanine H-N9 Guanine
opls_361 14.00670 ; Guanine N1
opls_362 12.01100 ; Guanine C2
opls_363 14.00670 ; Guanine N3
opls_364 12.01100 ; Guanine C4
opls_365 12.01100 ; Guanine C5
opls_366 12.01100 ; Guanine C6
opls_367 1.00800 ; Guanine H-N1
opls_368 14.00670 ; Guanine N-C2
opls_369 1.00800 ; Guanine H-NC2
opls_370 15.99940 ; Guanine O-C6
opls_371 12.01100 ; 9-Me Adenine or Guanine C-N9
opls_372 1.00800 ; 9-Me Adenine or Guanine H-CN9
opls_373 12.01100 ; 1-Me Uracil or Thymine C-N1
opls_374 1.00800 ; 1-Me Uracil or Thymine H-CN1
opls_375 12.01100 ; 1-Me Cytosine C-N1
opls_376 1.00800 ; 1-Me Cytosine H-CN1
opls_377 14.00670 ; CytH+ N1 Use #377B for nucleoside.
opls_377B 14.00670 ; CytH+ N1 - nucleoside only
opls_378 12.01100 ; CytH+ C2
opls_379 14.00670 ; CytH+ N3 Protonated cytosine.
opls_380 12.01100 ; CytH+ C4
opls_381 12.01100 ; CytH+ C5
opls_382 12.01100 ; CytH+ C6
opls_383 1.00800 ; CytH+ H-N1
opls_384 15.99940 ; CytH+ O-C2
opls_385 1.00800 ; CytH+ H-N3
opls_386 14.00670 ; CytH+ N-C4
opls_387 1.00800 ; CytH+ H-NC4/N3
opls_388 1.00800 ; CytH+ H-NC4/C5
opls_389 1.00800 ; CytH+ H-C5
opls_390 1.00800 ; CytH+ H-C6
opls_391 12.01100 ; 1-Me CytH+ C-N1
opls_392 1.00800 ; 1-Me CytH+ H-CN1
opls_393 30.97376 ; P dimethylphosphate anion UA - see #440 for AA
opls_394 15.99940 ; O(=) dimethylphosphate anion UA - see #440 for AA
opls_395 15.99940 ; O(-) dimethylphosphate anion UA - see #440 for AA
opls_396 12.01100 ; C in CH3 dimethylphosphate anion UA - see #440 for AA
opls_400 18.99840 ; F- JACS 106, 903 (1984)
opls_401 35.45300 ; Cl- JACS 106, 903 (1984)
opls_402 79.90400 ; Br- JACS 107, 7793(1985)
opls_403 126.90450 ; I- JACS 120, 5104(1998)
opls_404 6.94100 ; Li+ JACS 106, 903 (1984)
opls_405 22.98977 ; Na+ JACS 106, 903 (1984)
opls_406 6.94100 ; Li+
opls_407 22.98977 ; Na+ Aqvists cation
opls_408 39.09830 ; K+ parameters:
opls_409 85.46780 ; Rb+ JPC,94, 8021 (90)
opls_410 132.90540 ; Cs+
opls_411 24.30500 ; Mg++
opls_412 40.08000 ; Ca++
opls_413 87.62000 ; Sr++
opls_414 137.33000 ; Ba++
opls_415 12.01100 ; C in CH3S- thiolate
opls_416 1.00800 ; H in CH3S-
opls_417 32.06000 ; S in CH3S-
opls_418 12.01100 ; C in CH3O- alkoxide
opls_419 1.00800 ; H in CH3O-
opls_420 15.99940 ; O in CH3O-
opls_421 12.01100 ; C1 in CH2CN- RCN-
opls_422 1.00800 ; H in CH2CN-
opls_423 12.01100 ; C2 in CH2CN- JACS 111,4190 (89)
opls_424 14.00670 ; N in CH2CN-
opls_425 12.01100 ; C in CH3NH-
opls_426 1.00800 ; HC in CH3NH- RNH-
opls_427 14.00670 ; N in CH3NH-
opls_428 1.00800 ; HN in CH3NH-
opls_429 12.01100 ; C2 in CH3CH2- RCH2-
opls_430 1.00800 ; H in CH3CH2-
opls_431 12.01100 ; C1 in CH3CH2-
opls_432 1.00800 ; H1 in CH3CH2-
opls_433 0.00000 ; LP in CH3CH2-
opls_434 15.99940 ; O in OH- Hyroxide O-H = 0.953 A
opls_435 1.00800 ; H in OH- JACS 108, 2517 (86)
opls_436 0.00000 ; U in UO2+ J Mol Struct 366, 55 (96)
opls_437 15.99940 ; O in UO2+ r(U-O) = 1.80 A
opls_440 30.97376 ; P in Me2PO4-, Me2PO4H
opls_441 15.99940 ; O= in Me2PO4-, Me2PO4H
opls_442 15.99940 ; OMe in Me2PO4-, Me2PO4H dimethylphosphate
opls_443 12.01100 ; C in Me2PO4-, Me2PO4H dimetylphosphate
opls_444 1.00800 ; H in Me2PO4-, Me2PO4H 6-31+G* CHELPG
opls_445 30.97376 ; P in MeOPO3--, MeOPO3H2
opls_446 15.99940 ; O= in MeOPO3--, MeOPO3H2
opls_447 15.99940 ; OMe in MeOPO3--, MeOPO3H2 methyl phosphate
opls_448 12.01100 ; C in MeOPO3--, MeOPO3H2 6-31+G* CHELPG
opls_449 1.00800 ; H in MeOPO3--, MeOPO3H2
opls_450 30.97376 ; P in MePO3Me-, MePO3HMe
opls_451 15.99940 ; O= in MePO3Me-, MePO3HMe
opls_452 15.99940 ; OMe in MePO3Me-, MePO3HMe methyl
opls_453 12.01100 ; C(O) MePO3Me-, MePO3HMe methylphosphonate
opls_454 1.00800 ; H(CO) MePO3Me-, MePO3HMe 6-31+G* CHELPG
opls_455 12.01100 ; C(P) MePO3Me-, MePO3HMe
opls_456 1.00800 ; H(CP) MePO3Me-, MePO3HMe
opls_457 12.01100 ; Cipso benzyl methylphosphonate
opls_458 12.01100 ; C(O) benzyl methylphosphonate
opls_459 1.00800 ; H(CO) benzyl methylphosphonate
opls_460 12.01100 ; Cipso methyl benzylphosphonate
opls_461 12.01100 ; C(P) methyl benzylphosphonate
opls_462 1.00800 ; H(CP) methyl benzylphosphonate
opls_463 12.01100 ; Cipso C6H5OPO3(2-) use with #445-#447
opls_465 12.01100 ; AA C: esters - for R on C=O, use #280-#282
opls_466 15.99940 ; AA =O: esters
opls_467 15.99940 ; AA -OR: ester
opls_468 12.01100 ; methoxy C in esters - see also #490-#492
opls_469 1.00800 ; methoxy Hs in esters
opls_470 12.01100 ; Co in benzoic acid
opls_471 12.01100 ; Co in methyl benzoate, aryl ester
opls_472 12.01100 ; Cipso phenyl ester
opls_473 15.99940 ; AA -OR phenyl ester
opls_474 32.06000 ; S in sulfonamide, S(=O)2(OR)
opls_475 15.99940 ; O in sulfonamide, S(=O)2(OR)
opls_476 12.01100 ; CH3 attached to S of sulfonamide
opls_477 1.00800 ; H of Me attached to S of sulfonamide
opls_478 14.00670 ; N: primary amide of sulfonamide
opls_479 1.00800 ; H on N: primary sulfonamide
opls_480 14.00670 ; N secondary amide of sulfonamide
opls_481 1.00800 ; H on N: secondary sulfonamide
opls_482 12.01100 ; alpha CH3-N of sulfonamide
opls_483 1.00800 ; H of alpha CH3-N of sulfonamide
opls_484 12.01100 ; alpha CH2-N of sulfonamide. Use q=0.45 for CRH-N, q=0.65 for O=N-C-CH-N.
opls_485 1.00800 ; H of alpha CH2-N of sulfonamide
opls_486 12.01100 ; beta CH3 of N-ethyl sulfonamide
opls_487 1.00800 ; H of beta CH3 of N-ethyl sulfonamide
opls_488 12.01100 ; benzene C attached to S of sulfonamide
opls_490 12.01100 ; C(H2OS) ethyl ester
opls_491 12.01100 ; C(HOS) i-pr ester
opls_492 12.01100 ; C(OS) t-bu ester
opls_493 32.06000 ; S in sulfone
opls_494 15.99940 ; O in sulfone
opls_496 32.06000 ; sulfoxide - all atom
opls_497 15.99940 ; sulfoxide - all atom
opls_498 12.01100 ; CH3 all-atom C: sulfoxide
opls_499 12.01100 ; CH2 all-atom C: sulfoxide
opls_500 12.01100 ; CG in Trp
opls_501 12.01100 ; CD C in Trp
opls_502 12.01100 ; CE C in Trp
opls_503 14.00670 ; NE in Trp
opls_504 1.00800 ; H on NE in Trp
opls_505 12.01100 ; CB in His
opls_506 12.01100 ; CE1 in HID, HIE
opls_507 12.01100 ; CD2 in HID, CG in HIE
opls_508 12.01100 ; CG in HID, CD2 in HIE
opls_509 12.01100 ; CE1 in HIP
opls_510 12.01100 ; CG, CD2 in HIP
opls_511 14.00670 ; NE in HID, ND in HIE
opls_512 14.00670 ; N in HIP
opls_513 1.00800 ; H on N in HIP
opls_514 12.01100 ; CD1 in TRP
opls_515 12.01100 ; all-atom C: CH, isopropyl benzene
opls_516 12.01100 ; all-atom C: C, t-butyl benzene
opls_517 12.01100 ; vinyl ether HCOR
opls_518 12.01100 ; vinyl ether RCOR
opls_520 14.00670 ; N in pyridine 6-31G*
opls_521 12.01100 ; C1 in pyridine CHELPG
opls_522 12.01100 ; C2 in pyridine charges
opls_523 12.01100 ; C3 in pyridine for
opls_524 1.00800 ; H1 in pyridine 520-619
opls_525 1.00800 ; H2 in pyridine
opls_526 1.00800 ; H3 in pyridine
opls_527 14.00670 ; N in pyrazine
opls_528 12.01100 ; C in pyrazine
opls_529 1.00800 ; H in pyrazine
opls_530 14.00670 ; N in pyrimidine
opls_531 12.01100 ; C2 in pyrimidine
opls_532 12.01100 ; C4 in pyrimidine
opls_533 12.01100 ; C5 in pyrimidine
opls_534 1.00800 ; H2 in pyrimidine
opls_535 1.00800 ; H4 in pyrimidine
opls_536 1.00800 ; H5 in pyrimidine
opls_537 14.00670 ; N in pyridazine
opls_538 12.01100 ; C3 in pyridazine
opls_539 12.01100 ; C4 in pyridazine
opls_540 1.00800 ; H3 in pyridazine
opls_541 1.00800 ; H4 in pyridazine
opls_542 14.00670 ; N in pyrrole
opls_543 12.01100 ; C2 in pyrrole
opls_544 12.01100 ; C3 in pyrrole
opls_545 1.00800 ; H1 in pyrrole
opls_546 1.00800 ; H2 in pyrrole
opls_547 1.00800 ; H3 in pyrrole
opls_548 14.00670 ; N1 in pyrazole
opls_549 14.00670 ; N2 in pyrazole
opls_550 12.01100 ; C3 in pyrazole
opls_551 12.01100 ; C4 in pyrazole
opls_552 12.01100 ; C5 in pyrazole
opls_553 1.00800 ; H1 in pyrazole
opls_554 1.00800 ; H3 in pyrazole
opls_555 1.00800 ; H4 in pyrazole
opls_556 1.00800 ; H5 in pyrazole
opls_557 14.00670 ; N1 in imidazole
opls_558 12.01100 ; C2 in imidazole
opls_559 14.00670 ; N3 in imidazole
opls_560 12.01100 ; C4 in imidazole
opls_561 12.01100 ; C5 in imidazole
opls_562 1.00800 ; H1 in imidazole
opls_563 1.00800 ; H2 in imidazole
opls_564 1.00800 ; H4 in imidazole
opls_565 1.00800 ; H5 in imidazole
opls_566 15.99940 ; O in furan
opls_567 12.01100 ; C2 in furan
opls_568 12.01100 ; C3 in furan
opls_569 1.00800 ; H2 in furan
opls_570 1.00800 ; H3 in furan
opls_571 15.99940 ; O in oxazole
opls_572 12.01100 ; C2 in oxazole
opls_573 14.00670 ; N in oxazole
opls_574 12.01100 ; C4 in oxazole
opls_575 12.01100 ; C5 in oxazole
opls_576 1.00800 ; H2 in oxazole
opls_577 1.00800 ; H4 in oxazole
opls_578 1.00800 ; H5 in oxazole
opls_579 15.99940 ; O in isoxazole
opls_580 14.00670 ; N in isoxazole
opls_581 12.01100 ; C3 in isoxazole
opls_582 12.01100 ; C4 in isoxazole
opls_583 12.01100 ; C5 in isoxazole
opls_584 1.00800 ; H3 in isoxazole
opls_585 1.00800 ; H4 in isoxazole
opls_586 1.00800 ; H5 in isoxazole
opls_587 14.00670 ; N1 in indole
opls_588 12.01100 ; C2 in indole
opls_589 12.01100 ; C3 in indole
opls_590 12.01100 ; C4 in indole
opls_591 12.01100 ; C5 in indole
opls_592 12.01100 ; C6 in indole
opls_593 12.01100 ; C7 in indole
opls_594 12.01100 ; C8 in indole
opls_595 12.01100 ; C9 in indole
opls_596 1.00800 ; H1 in indole
opls_597 1.00800 ; H2 in indole
opls_598 1.00800 ; H3 in indole
opls_599 1.00800 ; H4 in indole
opls_600 1.00800 ; H5 in indole
opls_601 1.00800 ; H6 in indole
opls_602 1.00800 ; H7 in indole
opls_603 14.00670 ; N1 in quinoline
opls_604 12.01100 ; C2 in quinoline
opls_605 12.01100 ; C3 in quinoline
opls_606 12.01100 ; C4 in quinoline
opls_607 12.01100 ; C5 in quinoline
opls_608 12.01100 ; C6 in quinoline
opls_609 12.01100 ; C7 in quinoline
opls_610 12.01100 ; C8 in quinoline
opls_611 12.01100 ; C9 in quinoline
opls_612 12.01100 ; C10 in quinoline
opls_613 1.00800 ; H2 in quinoline
opls_614 1.00800 ; H3 in quinoline
opls_615 1.00800 ; H4 in quinoline
opls_616 1.00800 ; H5 in quinoline
opls_617 1.00800 ; H6 in quinoline
opls_618 1.00800 ; H7 in quinoline
opls_619 1.00800 ; H8 in quinoline
opls_620 14.00670 ; N1 in purine
opls_621 12.01100 ; C2 in purine
opls_622 14.00670 ; N3 in purine
opls_623 12.01100 ; C4 in purine
opls_624 12.01100 ; C5 in purine
opls_625 12.01100 ; C6 in purine
opls_626 14.00670 ; N7 in purine
opls_627 12.01100 ; C8 in purine
opls_628 14.00670 ; N9 in purine
opls_629 1.00800 ; H2 in purine
opls_630 1.00800 ; H6 in purine
opls_631 1.00800 ; H8 in purine
opls_632 1.00800 ; H9 in purine
opls_633 32.06000 ; S in thiazole
opls_634 12.01100 ; C2 in thiazole
opls_635 14.00670 ; N in thiazole
opls_636 12.01100 ; C4 in thiazole
opls_637 12.01100 ; C5 in thiazole
opls_638 1.00800 ; H2 in thiazole
opls_639 1.00800 ; H4 in thiazole
opls_640 1.00800 ; H5 in thiazole
opls_641 14.00670 ; N in 1,3,5-triazine
opls_642 12.01100 ; C in 1,3,5-triazine
opls_643 1.00800 ; H in 1,3,5-triazine
opls_644 12.01100 ; C5 in serotonin
opls_645 12.01100 ; C on C3 in serotonin
opls_646 14.00670 ; N1,N10 in 1,10-phenanthroline
opls_647 12.01100 ; C2,C9 in 1,10-phenanthroline
opls_648 12.01100 ; C3,C8 in 1,10-phenanthroline
opls_649 12.01100 ; C4,C7 in 1,10-phenanthroline
opls_650 12.01100 ; C12,C14 in 1,10-phenanthroline
opls_651 12.01100 ; C11,C13 in 1,10-phenanthroline
opls_652 12.01100 ; C5 in 1,10-phenanthroline
opls_653 1.00800 ; H2,H9 in 1,10-phenanthroline
opls_654 1.00800 ; H3,H8 in 1,10-phenanthroline
opls_655 1.00800 ; H4,H7 in 1,10-phenanthroline
opls_656 1.00800 ; H5,H6 in 1,10-phenanthroline
opls_670 12.01100 ; CH3, 2-methyl pyridine
opls_671 12.01100 ; CH2, 2-ethyl pyridine
opls_672 12.01100 ; CH3, 3-methyl pyridazine
opls_673 12.01100 ; CH2, 3-ethyl pyridazine
opls_674 12.01100 ; CH3, 4-methyl pyrimidine
opls_675 12.01100 ; CH2, 4-ethyl pyrimidine
opls_676 12.01100 ; CH3, 2-methyl pyrazine
opls_677 12.01100 ; CH2, 2-ethyl pyrazine
opls_678 12.01100 ; CH3, 2-methyl pyrrole
opls_679 12.01100 ; CH2, 2-ethyl pyrrole
opls_680 12.01100 ; CH3, 2-methyl furan
opls_681 12.01100 ; CH2, 2-ethyl furan
opls_697 0.00000 ; Ac+3 Actinide params -
opls_698 0.00000 ; Th+4
opls_699 0.00000 ; Am+3 F. van Veggel
opls_700 12.01100 ; C+ in t-butyl+ B3LYP/6-31G*
opls_701 12.01100 ; C in t-butyl+ charges
opls_702 1.00800 ; H in t-butyl+
opls_703 0.00000 ; La+3
opls_704 0.00000 ; Nd+3 Lanthanide params -
opls_705 0.00000 ; Eu+3 F. van Veggel, Chem Eur J 5, 90 (1999).
opls_706 0.00000 ; Gd+3
opls_707 0.00000 ; Yb+3 see also JPC-A 104, 7659 (2000)
opls_708 12.01100 ; C in Cl..CH3..Cl- TS
opls_709 35.45300 ; Cl charges: JACS 117,2024 (95)
opls_710 1.00800 ; H in Cl..CH3..Cl- TS
opls_711 12.01100 ; CH2 C: cyclopropane
opls_712 12.01100 ; CHR C: cyclopropane
opls_713 12.01100 ; CR2 C: cyclopropane
opls_714 12.01100 ; C in C5H5- cyclopentadienyl anion
opls_715 1.00800 ; H in C5H5- cyclopentadienyl anion
opls_716 12.01100 ; C in C5H5 cyclopentadienyl radical
opls_717 1.00800 ; H in C5H5 cyclopentadienyl radical
opls_718 12.01100 ; C(F) fluorobenzene
opls_719 18.99840 ; F fluorobenzene
opls_720 12.01100 ; C(F) hexafluorobenzene
opls_721 18.99840 ; F hexafluorobenzene
opls_722 79.90400 ; Br alkyl bromide (UA, but probably ok for AA)
opls_724 12.01100 ; C(CF3) trifluoromethylbenzene
opls_725 12.01100 ; CF3 trifluoromethylbenzene
opls_726 18.99840 ; F trifluoromethylbenzene
opls_727 12.01100 ; C(F) difluorobenzenes
opls_728 18.99840 ; F difluorobenzenes
opls_729 12.01100 ; C(Br) bromobenzene
opls_730 79.90400 ; Br bromobenzene
opls_731 12.01100 ; C(I) iodobenzene - tentative
opls_732 126.90450 ; I iodobenzene - tentative
opls_733 12.01100 ; all-atom C: CH, cyclopropyl benzene
opls_734 32.06000 ; all-atom S: thiophenol (HS is #204)
opls_735 12.01100 ; C(S) thiophenol
opls_736 12.01100 ; CG of Benzamidine
opls_737 12.01100 ; CD of Benzamidine
opls_738 12.01100 ; CE of Benzamidine
opls_739 12.01100 ; CZ of Benzamidine
opls_740 1.00800 ; HD of Benzamidine
opls_741 1.00800 ; HE of Benzamidine
opls_742 12.01100 ; C+ of Benzamidine
opls_743 14.00670 ; N-H2 of Benzamidine
opls_744 1.00800 ; H1-N of Benzamidine
opls_745 1.00800 ; H2-N of Benzamidine
opls_746 1.00800 ; H-CG of Benzamidine
opls_747 12.01100 ; CH3 in neutral MeGDN
opls_748 12.01100 ; CD of neutral ARG
opls_749 14.00670 ; NE of neutral ARG
opls_750 14.00670 ; N1 of neutral ARG (HN=CZ)
opls_751 14.00670 ; N2 of neutral ARG (H2N-CZ)
opls_752 12.01100 ; CZ of neutral ARG
opls_753 14.00670 ; N IN RCN nitriles
opls_754 12.01100 ; C IN RCN nitriles
opls_755 12.01100 ; C of CH3 in CH3CN
opls_756 12.01100 ; C of CH2 in RCH2CN
opls_757 12.01100 ; C of CH in R2CHCN
opls_758 12.01100 ; C of C in R3CCN
opls_759 1.00800 ; HC-CT-CN alpha-H in nitriles
opls_760 14.00670 ; N in nitro R-NO2
opls_761 15.99940 ; O in nitro R-NO2
opls_762 12.01100 ; CT-NO2 nitromethane
opls_763 1.00800 ; HC-CT-NO2 alpha-H in nitroalkanes
opls_764 12.01100 ; CT-NO2 nitroethane
opls_765 12.01100 ; CT-NO2 2-nitropropane
opls_766 12.01100 ; CT-NO2 2-methyl-2-nitropropane
opls_767 14.00670 ; N in nitro Ar-NO2
opls_768 12.01100 ; C(NO2) nitrobenzene
opls_771 15.99940 ; propylene carbonate O (Luciennes param.)
opls_772 12.01100 ; propylene carbonate C=O
opls_773 15.99940 ; propylene carbonate OS
opls_774 12.01100 ; propylene carbonate C in CH2
opls_775 12.01100 ; propylene carbonate C in CH
opls_776 12.01100 ; propylene carbonate C in CH3
opls_777 1.00800 ; propylene carbonate H in CH2
opls_778 1.00800 ; propylene carbonate H in CH
opls_779 1.00800 ; propylene carbonate H in CH3
opls_781 30.97376 ; phosphonium R4P+
opls_782 12.01100 ; CH3PR3+ 6-31G* CHELPG
opls_783 12.01100 ; RCH2PR3+
opls_784 1.00800 ; H in CH3PR3+
opls_785 30.97376 ; P in PF6-
opls_786 18.99840 ; F in PF6-
opls_787 14.00670 ; N in NO3-
opls_788 15.99940 ; O in NO3-
opls_795 15.99940 ; O TIP4F Water
opls_796 1.00800 ; H TIP4F Water
opls_797 0.00000 ; M TIP4F Water
opls_900 14.00670 ; N primary amines
opls_901 14.00670 ; N secondary amines, aziridine N1
opls_902 14.00670 ; N tertiary amines
opls_903 12.01100 ; CH3(N) primary aliphatic amines, H(C) is #911
opls_904 12.01100 ; CH3(N) secondary aliphatic amines, H(C) is #911
opls_905 12.01100 ; CH3(N) tertiary aliphatic amines, H(C) is #911
opls_906 12.01100 ; CH2(N) primary aliphatic amines, H(C) is #911
opls_906B 12.01100 ; CA in GLY-NH2 N-terminus
opls_907 12.01100 ; CH2(N) secondary aliphatic amines, aziridine C2,C3H
opls_908 12.01100 ; CH2(N) tertiary aliphatic amines, H(C) is #911
opls_909 1.00800 ; H(N) primary amines
opls_910 1.00800 ; H(N) secondary amines
opls_911 1.00800 ; H(C) for C bonded to N in amines, diamines (aziridine H2,H3)
opls_912 12.01100 ; CH primary isopropyl amine
opls_912B 12.01100 ; CA in NH2 N-terminus. All AA except GLY, PRO
opls_913 12.01100 ; C primary t-butyl amine
opls_914 12.01100 ; CH secondary isopropyl amine
opls_915 12.01100 ; CH tertiary isopropyl amine
opls_916 12.01100 ; C(NH2) aniline
opls_917 12.01100 ; C(NH2) N-methylaniline
opls_918 12.01100 ; C(NH2) N,N-dimethylaniline
opls_925 12.01100 ; alkyne RC%CH terminal C acetylene
opls_926 1.00800 ; alkyne RC%CH terminal H
opls_927 12.01100 ; alkyne RC%CH C2 R-with 2 or 3 H
opls_928 12.01100 ; alkyne RC%CH C2 R-with 1 H
opls_929 12.01100 ; alkyne RC%CH C2 R-with no H or R=Phenyl
opls_930 1.00800 ; alkyne RC%CH H on C3 (for C3 use #135-#139)
opls_931 12.01100 ; alkyne RC%CR
opls_940 14.00670 ; N (R3NH+)
opls_941 1.00800 ; H (R3NH+)
opls_942 12.01100 ; C in CH3NHR2+
opls_943 12.01100 ; C in RCH2NHR2+
opls_944 12.01100 ; C in R2CHNHR2+
opls_945 12.01100 ; C in R3CNHR2+
opls_950 1.00800 ; glycine zwit. 6-31G* CHELPG charges
opls_951 12.01100 ; glycine zwit. 6-31G* CHELPG charges
opls_952 12.01100 ; glycine zwit. 6-31G* CHELPG charges
opls_953 14.00670 ; glycine zwit. 6-31G* CHELPG charges
opls_954 15.99940 ; glycine zwit. 6-31G* CHELPG charges
opls_955 1.00800 ; glycine zwit. 6-31G* CHELPG charges
opls_956 18.99840 ; F in monoalkyl fluorides (tentative)
opls_957 12.01100 ; RCH2F in monoalkyl fluorides (tentative)
opls_958 1.00800 ; H in RCHF in monoalkyl fluorides (tentative)
opls_959 12.01100 ; R2CHF in monoalkyl fluorides (tentative)
opls_960 12.01100 ; R3CF in monoalkyl fluorides (tentative)
opls_961 12.01100 ; CF3 perfluoroalkanes
opls_962 12.01100 ; CF2 perfluoroalkanes
opls_963 12.01100 ; CF perfluoroalkanes
opls_964 12.01100 ; CF4
opls_965 18.99840 ; F: perfluoroalkanes
MNH3 0.0 ; Dummy mass in rigid tetraedrical NH3 group
MNH2 0.0 ; Dummy mass in rigid umbrella-shaped NH2 group
MCH3A 0.0 ; Dummy mass in rigid tetraedrical CH3 group
MCH3B 0.0 ; Dummy mass in rigid tetraedrical CH3 group
MW 0.0 ; Dummy mass in rigid tyrosine rings
DUM 0.0 ; Dummy mass in TIP4P etc.
; These ion atomtypes are NOT part of OPLS, but since they are
; needed for some proteins we have added them.
Cu2+ 63.546 ; Copper. See Inorg. Chem. 40, 5223 (2001).
Fe2+ 55.847 ; Iron
Zn2+ 65.370 ; Zinc
Ar 39.948 ; Argon
; Added by DvdS 05/2005 copied from GROMACS force field.
SI 28.080 ; Silicium in Glass etc.
"""
|
csmm/multiase
|
multiasecalc/lammps/oplsaatypes.py
|
Python
|
gpl-2.0
| 33,538
|
[
"Gromacs"
] |
ba3256bac192b83228d7706361b248acba86f327606bf43d3e0b34e356268fa2
|
# -*- coding: utf-8 -*-
# vi:si:et:sw=4:sts=4:ts=4
##
## Copyright (C) 2012 Async Open Source <http://www.async.com.br>
## All rights reserved
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., or visit: http://www.gnu.org/.
##
## Author(s): Stoq Team <stoq-devel@async.com.br>
##
from stoqlib.gui.search.sellableunitsearch import SellableUnitSearch
from stoqlib.gui.test.uitestutils import GUITest
class TestSellableUnitSearchSearch(GUITest):
def test_search(self):
search = SellableUnitSearch(self.store)
search.search.refresh()
self.check_search(search, 'sellable-unit-no-filter')
search.set_searchbar_search_string('kg')
search.search.refresh()
self.check_search(search, 'sellable-unit-string-filter')
|
andrebellafronte/stoq
|
stoqlib/gui/test/test_sellableunitsearch.py
|
Python
|
gpl-2.0
| 1,373
|
[
"VisIt"
] |
f305812815a00ac393e1bb97c829bb11e69253e992fc2f7e55f6b0a626f3c213
|
########################################################################
#
# (C) 2013, James Cammarata <jcammarata@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
########################################################################
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import os.path
import sys
import yaml
from collections import defaultdict
from distutils.version import LooseVersion
from jinja2 import Environment
import ansible.constants as C
from ansible.cli import CLI
from ansible.errors import AnsibleError, AnsibleOptionsError
from ansible.galaxy import Galaxy
from ansible.galaxy.api import GalaxyAPI
from ansible.galaxy.role import GalaxyRole
from ansible.playbook.role.requirement import RoleRequirement
class GalaxyCLI(CLI):
VALID_ACTIONS = ("init", "info", "install", "list", "remove", "search")
SKIP_INFO_KEYS = ("name", "description", "readme_html", "related", "summary_fields", "average_aw_composite", "average_aw_score", "url" )
def __init__(self, args, display=None):
self.api = None
self.galaxy = None
super(GalaxyCLI, self).__init__(args, display)
def parse(self):
''' create an options parser for bin/ansible '''
self.parser = CLI.base_parser(
usage = "usage: %%prog [%s] [--help] [options] ..." % "|".join(self.VALID_ACTIONS),
epilog = "\nSee '%s <command> --help' for more information on a specific command.\n\n" % os.path.basename(sys.argv[0])
)
self.set_action()
# options specific to actions
if self.action == "info":
self.parser.set_usage("usage: %prog info [options] role_name[,version]")
elif self.action == "init":
self.parser.set_usage("usage: %prog init [options] role_name")
self.parser.add_option('-p', '--init-path', dest='init_path', default="./",
help='The path in which the skeleton role will be created. The default is the current working directory.')
self.parser.add_option(
'--offline', dest='offline', default=False, action='store_true',
help="Don't query the galaxy API when creating roles")
elif self.action == "install":
self.parser.set_usage("usage: %prog install [options] [-r FILE | role_name(s)[,version] | scm+role_repo_url[,version] | tar_file(s)]")
self.parser.add_option('-i', '--ignore-errors', dest='ignore_errors', action='store_true', default=False,
help='Ignore errors and continue with the next specified role.')
self.parser.add_option('-n', '--no-deps', dest='no_deps', action='store_true', default=False,
help='Don\'t download roles listed as dependencies')
self.parser.add_option('-r', '--role-file', dest='role_file',
help='A file containing a list of roles to be imported')
elif self.action == "remove":
self.parser.set_usage("usage: %prog remove role1 role2 ...")
elif self.action == "list":
self.parser.set_usage("usage: %prog list [role_name]")
elif self.action == "search":
self.parser.add_option('--platforms', dest='platforms',
help='list of OS platforms to filter by')
self.parser.add_option('--galaxy-tags', dest='tags',
help='list of galaxy tags to filter by')
self.parser.set_usage("usage: %prog search [<search_term>] [--galaxy-tags <galaxy_tag1,galaxy_tag2>] [--platforms platform]")
# options that apply to more than one action
if self.action != "init":
self.parser.add_option('-p', '--roles-path', dest='roles_path', default=C.DEFAULT_ROLES_PATH,
help='The path to the directory containing your roles. '
'The default is the roles_path configured in your '
'ansible.cfg file (/etc/ansible/roles if not configured)')
if self.action in ("info","init","install","search"):
self.parser.add_option('-s', '--server', dest='api_server', default="https://galaxy.ansible.com",
help='The API server destination')
self.parser.add_option('-c', '--ignore-certs', action='store_false', dest='validate_certs', default=True,
help='Ignore SSL certificate validation errors.')
if self.action in ("init","install"):
self.parser.add_option('-f', '--force', dest='force', action='store_true', default=False,
help='Force overwriting an existing role')
# get options, args and galaxy object
self.options, self.args =self.parser.parse_args()
self.display.verbosity = self.options.verbosity
self.galaxy = Galaxy(self.options, self.display)
return True
def run(self):
super(GalaxyCLI, self).run()
# if not offline, get connect to galaxy api
if self.action in ("info","install", "search") or (self.action == 'init' and not self.options.offline):
api_server = self.options.api_server
self.api = GalaxyAPI(self.galaxy, api_server)
if not self.api:
raise AnsibleError("The API server (%s) is not responding, please try again later." % api_server)
self.execute()
def exit_without_ignore(self, rc=1):
"""
Exits with the specified return code unless the
option --ignore-errors was specified
"""
if not self.get_opt("ignore_errors", False):
raise AnsibleError('- you can use --ignore-errors to skip failed roles and finish processing the list.')
def _display_role_info(self, role_info):
text = "\nRole: %s \n" % role_info['name']
text += "\tdescription: %s \n" % role_info.get('description', '')
for k in sorted(role_info.keys()):
if k in self.SKIP_INFO_KEYS:
continue
if isinstance(role_info[k], dict):
text += "\t%s: \n" % (k)
for key in sorted(role_info[k].keys()):
if key in self.SKIP_INFO_KEYS:
continue
text += "\t\t%s: %s\n" % (key, role_info[k][key])
else:
text += "\t%s: %s\n" % (k, role_info[k])
return text
############################
# execute actions
############################
def execute_init(self):
"""
Executes the init action, which creates the skeleton framework
of a role that complies with the galaxy metadata format.
"""
init_path = self.get_opt('init_path', './')
force = self.get_opt('force', False)
offline = self.get_opt('offline', False)
role_name = self.args.pop(0).strip() if self.args else None
if not role_name:
raise AnsibleOptionsError("- no role name specified for init")
role_path = os.path.join(init_path, role_name)
if os.path.exists(role_path):
if os.path.isfile(role_path):
raise AnsibleError("- the path %s already exists, but is a file - aborting" % role_path)
elif not force:
raise AnsibleError("- the directory %s already exists." % role_path + \
"you can use --force to re-initialize this directory,\n" + \
"however it will reset any main.yml files that may have\n" + \
"been modified there already.")
# create the default README.md
if not os.path.exists(role_path):
os.makedirs(role_path)
readme_path = os.path.join(role_path, "README.md")
f = open(readme_path, "wb")
f.write(self.galaxy.default_readme)
f.close()
for dir in GalaxyRole.ROLE_DIRS:
dir_path = os.path.join(init_path, role_name, dir)
main_yml_path = os.path.join(dir_path, 'main.yml')
# create the directory if it doesn't exist already
if not os.path.exists(dir_path):
os.makedirs(dir_path)
# now create the main.yml file for that directory
if dir == "meta":
# create a skeleton meta/main.yml with a valid galaxy_info
# datastructure in place, plus with all of the available
# platforms included (but commented out), the galaxy_tags
# list, and the dependencies section
platforms = []
if not offline and self.api:
platforms = self.api.get_list("platforms") or []
# group the list of platforms from the api based
# on their names, with the release field being
# appended to a list of versions
platform_groups = defaultdict(list)
for platform in platforms:
platform_groups[platform['name']].append(platform['release'])
platform_groups[platform['name']].sort()
inject = dict(
author = 'your name',
company = 'your company (optional)',
license = 'license (GPLv2, CC-BY, etc)',
issue_tracker_url = 'http://example.com/issue/tracker',
min_ansible_version = '1.2',
platforms = platform_groups,
)
rendered_meta = Environment().from_string(self.galaxy.default_meta).render(inject)
f = open(main_yml_path, 'w')
f.write(rendered_meta)
f.close()
pass
elif dir not in ('files','templates'):
# just write a (mostly) empty YAML file for main.yml
f = open(main_yml_path, 'w')
f.write('---\n# %s file for %s\n' % (dir,role_name))
f.close()
self.display.display("- %s was created successfully" % role_name)
def execute_info(self):
"""
Executes the info action. This action prints out detailed
information about an installed role as well as info available
from the galaxy API.
"""
if len(self.args) == 0:
# the user needs to specify a role
raise AnsibleOptionsError("- you must specify a user/role name")
roles_path = self.get_opt("roles_path")
data = ''
for role in self.args:
role_info = {'path': roles_path}
gr = GalaxyRole(self.galaxy, role)
install_info = gr.install_info
if install_info:
if 'version' in install_info:
install_info['intalled_version'] = install_info['version']
del install_info['version']
role_info.update(install_info)
remote_data = False
if self.api:
remote_data = self.api.lookup_role_by_name(role, False)
if remote_data:
role_info.update(remote_data)
if gr.metadata:
role_info.update(gr.metadata)
req = RoleRequirement()
role_spec= req.role_yaml_parse({'role': role})
if role_spec:
role_info.update(role_spec)
data += self._display_role_info(role_info)
if not data:
data += "\n- the role %s was not found" % role
self.pager(data)
def execute_install(self):
"""
Executes the installation action. The args list contains the
roles to be installed, unless -f was specified. The list of roles
can be a name (which will be downloaded via the galaxy API and github),
or it can be a local .tar.gz file.
"""
role_file = self.get_opt("role_file", None)
if len(self.args) == 0 and role_file is None:
# the user needs to specify one of either --role-file
# or specify a single user/role name
raise AnsibleOptionsError("- you must specify a user/role name or a roles file")
elif len(self.args) == 1 and not role_file is None:
# using a role file is mutually exclusive of specifying
# the role name on the command line
raise AnsibleOptionsError("- please specify a user/role name, or a roles file, but not both")
no_deps = self.get_opt("no_deps", False)
force = self.get_opt('force', False)
roles_left = []
if role_file:
try:
f = open(role_file, 'r')
if role_file.endswith('.yaml') or role_file.endswith('.yml'):
try:
required_roles = yaml.safe_load(f.read())
except Exception as e:
raise AnsibleError("Unable to load data from the requirements file: %s" % role_file)
if required_roles is None:
raise AnsibleError("No roles found in file: %s" % role_file)
for role in required_roles:
role = RoleRequirement.role_yaml_parse(role)
self.display.debug('found role %s in yaml file' % str(role))
if 'name' not in role and 'scm' not in role:
raise AnsibleError("Must specify name or src for role")
roles_left.append(GalaxyRole(self.galaxy, **role))
else:
self.display.deprecated("going forward only the yaml format will be supported")
# roles listed in a file, one per line
for rline in f.readlines():
self.display.debug('found role %s in text file' % str(rline))
role = RoleRequirement.role_yaml_parse(rline.strip())
roles_left.append(GalaxyRole(self.galaxy, **role))
f.close()
except (IOError, OSError) as e:
self.display.error('Unable to open %s: %s' % (role_file, str(e)))
else:
# roles were specified directly, so we'll just go out grab them
# (and their dependencies, unless the user doesn't want us to).
for rname in self.args:
roles_left.append(GalaxyRole(self.galaxy, rname.strip()))
for role in roles_left:
self.display.debug('Installing role %s ' % role.name)
# query the galaxy API for the role data
role_data = None
if role.install_info is not None and not force:
self.display.display('- %s is already installed, skipping.' % role.name)
continue
try:
installed = role.install()
except AnsibleError as e:
self.display.warning("- %s was NOT installed successfully: %s " % (role.name, str(e)))
self.exit_without_ignore()
continue
# install dependencies, if we want them
if not no_deps and installed:
role_dependencies = role.metadata.get('dependencies') or []
for dep in role_dependencies:
self.display.debug('Installing dep %s' % dep)
dep_req = RoleRequirement()
dep_info = dep_req.role_yaml_parse(dep)
dep_role = GalaxyRole(self.galaxy, **dep_info)
if '.' not in dep_role.name and '.' not in dep_role.src and dep_role.scm is None:
# we know we can skip this, as it's not going to
# be found on galaxy.ansible.com
continue
if dep_role.install_info is None or force:
if dep_role not in roles_left:
self.display.display('- adding dependency: %s' % dep_role.name)
roles_left.append(dep_role)
else:
self.display.display('- dependency %s already pending installation.' % dep_role.name)
else:
self.display.display('- dependency %s is already installed, skipping.' % dep_role.name)
if not installed:
self.display.warning("- %s was NOT installed successfully." % role.name)
self.exit_without_ignore()
return 0
def execute_remove(self):
"""
Executes the remove action. The args list contains the list
of roles to be removed. This list can contain more than one role.
"""
if len(self.args) == 0:
raise AnsibleOptionsError('- you must specify at least one role to remove.')
for role_name in self.args:
role = GalaxyRole(self.galaxy, role_name)
try:
if role.remove():
self.display.display('- successfully removed %s' % role_name)
else:
self.display.display('- %s is not installed, skipping.' % role_name)
except Exception as e:
raise AnsibleError("Failed to remove role %s: %s" % (role_name, str(e)))
return 0
def execute_list(self):
"""
Executes the list action. The args list can contain zero
or one role. If one is specified, only that role will be
shown, otherwise all roles in the specified directory will
be shown.
"""
if len(self.args) > 1:
raise AnsibleOptionsError("- please specify only one role to list, or specify no roles to see a full list")
if len(self.args) == 1:
# show only the request role, if it exists
name = self.args.pop()
gr = GalaxyRole(self.galaxy, name)
if gr.metadata:
install_info = gr.install_info
version = None
if install_info:
version = install_info.get("version", None)
if not version:
version = "(unknown version)"
# show some more info about single roles here
self.display.display("- %s, %s" % (name, version))
else:
self.display.display("- the role %s was not found" % name)
else:
# show all valid roles in the roles_path directory
roles_path = self.get_opt('roles_path')
roles_path = os.path.expanduser(roles_path)
if not os.path.exists(roles_path):
raise AnsibleOptionsError("- the path %s does not exist. Please specify a valid path with --roles-path" % roles_path)
elif not os.path.isdir(roles_path):
raise AnsibleOptionsError("- %s exists, but it is not a directory. Please specify a valid path with --roles-path" % roles_path)
path_files = os.listdir(roles_path)
for path_file in path_files:
gr = GalaxyRole(self.galaxy, path_file)
if gr.metadata:
install_info = gr.install_info
version = None
if install_info:
version = install_info.get("version", None)
if not version:
version = "(unknown version)"
self.display.display("- %s, %s" % (path_file, version))
return 0
def execute_search(self):
search = None
if len(self.args) > 1:
raise AnsibleOptionsError("At most a single search term is allowed.")
elif len(self.args) == 1:
search = self.args.pop()
response = self.api.search_roles(search, self.options.platforms, self.options.tags)
if 'count' in response:
self.galaxy.display.display("Found %d roles matching your search:\n" % response['count'])
data = ''
if 'results' in response:
for role in response['results']:
data += self._display_role_info(role)
self.pager(data)
|
simobasso/ansible
|
lib/ansible/cli/galaxy.py
|
Python
|
gpl-3.0
| 20,751
|
[
"Galaxy"
] |
fd045f7025f5b00deee66d4c61ff6eef51b30ff1362415823cd501309faad673
|
"""
.. module: FSRStools.refind
:platform: Windows
.. moduleauthor:: Daniel Dietze <daniel.dietze@berkeley.edu>
A collection of functions related to the optical refractive index.
Apart from providing tabulated indices for commonly used optical materials including dielectrics, common solvents and metals,
this module defines functions for more general use. These include Kramers-Kronig transformations, the Lorentz oscillator model,
general support for Sellmeier equations, inhomogeneous broadening and first, second and third order derivatives with respect to
wavelength. Furthermore, functions for conversion from refractive index to group index and permittivity and vice versa are provided.
.. note:: All functions for tabulated refractive indices expect wavelength to be in um.
**Change log:**
*01-29-2016*:
- Changed syntax in `load_data` in response to numpy V 1.10 changes.
*02-09-2016*:
- Added gadolinium gallium garnet to `n_glass`.
- Added polystyrene to `n_polymer`.
- Updated `kramers_kronig`.
..
This file is part of the FSRStools python module.
The FSRStools python module is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
The FSRStools python module is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with the FSRStools python module. If not, see <http://www.gnu.org/licenses/>.
Copyright 2014, 2015 Daniel Dietze <daniel.dietze@berkeley.edu>.
"""
import numpy as np
from scipy.interpolate import interp1d
import FSRStools.fitting as ft
import pkgutil
from StringIO import StringIO
# #################################################################################################################
# helper function to load local data files as numpy arrays independent of the absolute/relative paths of the module
# and the calling script
def load_data(filename, **argv):
s = pkgutil.get_data(__package__, filename)
ss = StringIO(s)
# fix change in numpy version
if np.__version__.startswith('1.10') and 'skiprows' in argv:
argv['skip_header'] = argv.pop('skiprows')
data = np.genfromtxt(ss, dtype='float', unpack=True, **argv)
return data
# #################################################################################################################
# derivatives of refractive indices
# index N is function of wl and arguments *args
# wavelength in um
def dN(N, wl, *args):
"""First order derivative of the refractive index as function of wavelength using first-order finite difference.
:param func N: (Complex) Refractive index as function of wavelength and possible arguments: N(wl, args).
:param array wl: Wavelenth axis in um (can also be a single float).
:param mixed args: Variable number of arguments to be provided to N.
:returns: First order derivative of N with same shape as wl.
"""
eps = 1e-4
return (N(wl + eps, *args) - N(wl - eps, *args)) / (2.0 * eps)
def d2N(N, wl, *args):
"""Second order derivative of the refractive index as function of wavelength using first-order finite difference.
:param func N: (Complex) Refractive index as function of wavelength and possible arguments: N(wl, args).
:param array wl: Wavelenth axis in um (can also be a single float).
:param mixed args: Variable number of arguments to be provided to N.
:returns: First order derivative of N with same shape as wl.
"""
eps = 1e-4
return (dN(N, wl + eps, *args) - dN(N, wl - eps, *args)) / (2.0 * eps)
def d3N(N, wl, *args):
"""Third order derivative of the refractive index as function of wavelength using first-order finite difference.
:param func N: (Complex) Refractive index as function of wavelength and possible arguments: N(wl, args).
:param array wl: Wavelenth axis in um (can also be a single float).
:param mixed args: Variable number of arguments to be provided to N.
:returns: First order derivative of N with same shape as wl.
"""
eps = 1e-4
return (d2N(N, wl + eps, *args) - d2N(N, wl - eps, *args)) / (2.0 * eps)
# #################################################################################################################
# group velocity refractive index
# N is refractive index in the form N(l, *args)
def n_group(N, wl, *args):
"""Group velocity refractive index.
:param func N: (Complex) Refractive index as function of wavelength and possible arguments: N(wl, args).
:param array wl: Wavelenth axis in um (can also be a single float).
:param mixed args: Variable number of arguments to be provided to N.
:returns: Group velocity refractive index Ng = N - wl * dN with same shape as wl.
"""
return N(wl, *args) - wl * dN(N, wl, *args)
# #################################################################################################################
# convert complex refractive index to dielectric function
def n_to_eps(n):
"""Convert (complex) refractive index to (complex) permittivity.
:param array n: Refractive index to be converted to permittivity.
:returns: Permittivity (same shape as n).
"""
return np.real(n)**2 - np.imag(n)**2 + 1j * 2.0 * np.real(n) * np.imag(n)
# and vice versa
def eps_to_n(eps):
"""Convert (complex) permittivity to (complex) refractive index.
:param array eps: Permittivity to be converted to refractive index.
:returns: Refractive index (same shape as eps).
"""
return np.lib.scimath.sqrt(eps)
#################################################################################################################
# inhomogeneous broadening function
def applyInhomogeneousBroadening(x, y, dx):
"""Convolute a refractive index / permittivity profile with a Gaussian to introduce inhomogeneous broadening.
This broadened refractive index / permittivity still complies with Kramers Kronig relations.
:param array x: Frequency axis in same units as dx.
:param array y: Refractive index / permittivity profile to convolute (same shape as x).
:param float dx: FWHM of Gaussian in same units as x.
:returns: Convoluted / broadened refractive index / permittivity profile.
"""
N = len(y)
Npad = int(N / 2)
delta = x[1] - x[0]
ypad = np.pad(y, Npad, mode='reflect', reflect_type='odd')
g = ft.gauss(delta * (np.arange(len(ypad)) - len(ypad) / 2), 1, 0, dx)
# np convolve uses a sum, whereas the function we want uses an integral; x[1] - x[0] is dx
# prefactor is normalization to unit area Gaussian
return np.convolve(ypad, g, 'same')[Npad:-Npad] / np.sum(g)
# #################################################################################################################
# calculate the Kramers-Kronig transformation of absorption spectrum k as function of frequency w
# IMPORTANT: k and w have to be sampled on a uniform grid!!!
# this formulation is to be used for the refractive index!
def kramers_kronig(nu, k, dir='forward'):
"""Calculate the Kramers-Kronig transformation of extinction coefficient spectrum k as function of wavenumber nu (or frequency) and vice-versa.
The extinction coefficient is the imaginary part of the refractive index, which is related to the absorption coefficient alpha through k = alpha x lambda0 / (4 pi), where lambda0 is the vacuum wavelength.
Calculates dn = n(nu') - n(infinity) = 2/pi x Pint( nu x k / (nu^2 - nu'^2) dnu), where Pint is Cauchy's principal value integral.
The method is based on Maclaurin's formula and details are found in Ohta et a., Appl. Spectrosc. 42, 952 (1988).
.. note:: The same function can be used for angular frequency instead of wavenumber and for susceptibility instead of refractive index.
.. important:: k and nu have to be sampled on the same **uniform** grid!!!
:param array nu: Wavenumber axis.
:param array k: Extinction coefficient spectrum (same shape as nu).
:param str dir: Direction is 'forward' (default) or 'backward', where 'forward' calculates the real part and 'backward' calculates the imaginary part.
:returns: Real part of refractive index change, dn, as obtained by Kramers-Kronig transformation (same shape as nu).
"""
h = np.absolute(nu[1] - nu[0])
S = np.zeros(len(k))
if dir != 'backward': # forward
for i, _ in enumerate(k):
if(i % 2 == 1): # odd
S[i] = np.sum(0.5 * (k[0::2] / (nu[0::2] - nu[i]) + k[0::2] / (nu[0::2] + nu[i])))
else: # even
S[i] = np.sum(0.5 * (k[1::2] / (nu[1::2] - nu[i]) + k[1::2] / (nu[1::2] + nu[i])))
return 2.0 / np.pi * 2.0 * h * S
else: # backward - difference are two minus signs
for i, _ in enumerate(k):
if(i % 2 == 1): # odd
S[i] = np.sum(0.5 * (k[0::2] / (nu[0::2] - nu[i]) - k[0::2] / (nu[0::2] + nu[i])))
else: # even
S[i] = np.sum(0.5 * (k[1::2] / (nu[1::2] - nu[i]) - k[1::2] / (nu[1::2] + nu[i])))
return -2.0 / np.pi * 2.0 * h * S
# #################################################################################################################
# metals: type = gold, silver, copper, aluminum, chromium
# use interpolation of experimental data from Johnson and Christy for Ag, Au, Cu and Rakic for Al
# wavelength wl in um
def n_metal(wl, type='gold'):
"""Returns refractive index of some metals (Au, Ag, Cu, Al, Cr).
Uses cubic interpolation of experimental values obtained from Johnson and Christy 1972 (Ag, Au, Cu) and Rakic 1998 (Al, Cr).
:param array wl: Wavelength axis in um.
:param str type: Type of metal ('Au'/'gold', 'Ag'/'silver', 'Cu'/'copper', 'Al'/'aluminum'/'aluminium', 'Cr'/'chromium').
:returns: (Complex) Refractive index for given wavelength axis (same shape as wl).
.. note:: Interpolation is done via SciPy's interp1d function, which throws an exception when the new x-coordinate is out of range.
"""
if(type in ['gold', 'Au', 'copper', 'Cu', 'silver', 'Ag']):
A = load_data("nobelmetals.dat")
w0 = 2.9979e8 / (A[0] * 1.6022e-19 / 6.6261e-34) * 1e6 # eV to um
if type == "silver" or type == "Ag":
n0 = A[3] + 1j * A[4]
elif type == "copper" or type == "Cu":
n0 = A[1] + 1j * A[2]
else: # gold
n0 = A[5] + 1j * A[6]
B = interp1d(np.flipud(w0), np.flipud(n0), kind='cubic')
elif type in ['aluminum', 'aluminium', 'Al']:
A = load_data("METALS_Aluminium_Rakic.txt", skiprows=1)
n0 = A[1] + 1j * A[2]
B = interp1d(np.flipud(A[0]), np.flipud(n0), kind='cubic')
elif type in ['chromium', 'Cr']: # filmetrics.com
A = load_data("Cr.txt", skiprows=2)
n0 = A[1] + 1j * A[2]
B = interp1d(A[0] / 1000.0, n0, kind='cubic')
return B(wl)
# use Etchegoin model: Drude term (epsinf, lp, gp) plus Lorentz poles (A, phi, lambda, gamma)
# wl is wavelength in um
def n_metal_model(wl, epsinf, lp, gp, *poles):
"""Use the model presented in Etchegoin et al., *J Chem Phys* **125**, 164705 (2006) to describe the refractive index of nobel metals using a combination of Drude and Lorentz models.
:param array wl: Wavelength axis in um.
:param float epsinf: High frequency limit of permittivity.
:param float lp: Plasma wavelength for Drude model (same units as wl).
:param float gp: Damping term for Drude model (same units as wl).
:param mixed poles: Parameters for Lorentz poles. For each pole provide (amplitude, phase offset, resonance wavelength and width).
:returns: Complex refractive index (same shape as wl).
"""
epsilon = complex(epsinf)
epsilon -= 1.0 / (lp**2 * (1.0 / wl**2 + 1j / (gp * wl)))
N = int(len(poles) / 4)
for i in range(N):
epsilon += poles[i * 4 + 0] / poles[i * 4 + 2] * (np.exp(1j * poles[i * 4 + 1]) / (1.0 / poles[i * 4 + 2] - 1.0 / wl - 1j / poles[i * 4 + 3]) + np.exp(-1j * poles[i * 4 + 1]) / (1.0 / poles[i * 4 + 2] + 1.0 / wl + 1j / poles[i * 4 + 3]))
return np.lib.scimath.sqrt(epsilon)
# general sellmeier type of refractive index
# p = B0, B1, B2, .. Bx, C0, C1, C2, .. Cx
def n_sellmeier(wl, *p):
"""General Sellmeier-type of refractive index.
:param array wl: Wavelength axis in um.
:param mixed p: Sellmeier coefficients. For each term provide two parameters Bi and Ci in the form p = B0, B1, B2, .. Bx, C0, C1, C2, .. Cx.
:returns: Refractive index n^2 - 1 = sum(Bi * wl^2 / (wl^2 - Ci)) with same shape as wl.
"""
N = int(len(p) / 2)
eps = 1.0
for i in range(N):
eps += p[i] * wl**2 / (wl**2 - p[N + i])
return np.sqrt(eps)
# #################################################################################################################
# refractive indices of standard air
# wl is wavelength in um, data taken from refractiveindex.com
# dry, 15degC, 101325 Pa with 450ppm CO2
def n_air(wl):
"""Refractive index of standard air (dry, 15degC, 101325 Pa with 450ppm CO2). Data taken from refractiveindex.info.
:param array wl: Wavelength axis in um.
"""
return 1.0 + 0.05792105 / (238.0185 - 1.0 / wl**2) + 0.00167917 / (57.362 - 1.0 / wl**2)
# #################################################################################################################
# refractive indices of common glasses
# refractive index of common glasses
# type = SiO2, F2, NF2, SF11, BK7, SF10, Sapphire_E, Sapphire_O
# CoverGlass is siliconized 22x22mm thick cover glass, parameters from fit to absorption spectrum
# wl is in um
def n_glass(wl, type="SiO2"):
"""Refractive index of some common glasses using their respective Sellmeier coefficients.
.. versionchanged:: 02-09-2016
Added Gadolinium Gallium Garnet 'GGG'.
:param array wl: Wavelength axis in um.
:param str type: Type of glass ('SiO2', 'F2', 'NF2', 'SF10', 'SF11', 'BK7', 'Sapphire_E', 'Sapphire_O', 'CoverGlass', 'GGG'). 'CoverGlass' refers to silanized microscope cover glasses with parameters fitted to experimental data. 'GGG' is Gadolinium Gallium Garnet.
:returns: Refractive index (same shape as wl).
"""
Bdict = {"SiO2": [0.696166300, 0.407942600, 0.897479400],
"F2": [1.34533359, 0.209073118, 0.937357162],
"NF2": [1.39757037, 0.159201403, 1.26865430],
"SF11": [1.73759695, 0.313747346, 1.89878101],
"BK7": [1.03961212, 0.231792344, 1.01046945],
"SF10": [1.61625977, 0.259229334, 1.07762317],
"Sapphire_E": [1.50397590, 0.550691410, 6.5927379],
"Sapphire_O": [1.43134930, 0.650547130, 5.34140210],
"CoverGlass": [4.21885399e-03, 3.80246387e-01, 1.27271765e+00],
"GGG": [1.7727, 0.9767, 4.9668]}
Cdict = {"SiO2": [0.00467914826, 0.0135120631, 97.9340025],
"F2": [0.00997743871, 0.0470450767, 111.886764],
"NF2": [0.00995906143, 0.0546931752, 119.248346],
"SF11": [0.0113188707, 0.0623068142, 155.236290],
"BK7": [0.00600069867, 0.0200179144, 103.560653],
"SF10": [0.0127534559, 0.0581983954, 116.607680],
"Sapphire_E": [0.00548041129, 0.0147994281, 402.895140],
"Sapphire_O": [0.00527992610, 0.0142382647, 325.017834],
"CoverGlass": [1.14731882e-01, 9.92082291e-04, 6.53964380e+02],
"GGG": [0.1567, 0.01375, 22.715]}
try:
args = Bdict[type] + Cdict[type]
except:
print("unknown glass type!")
return n_sellmeier(wl, *args)
# #################################################################################################################
# refractive index of common materials used for optical coatings
# data taken from refractiveindex.info
def n_coatings(wl, type="MgF2_o"):
"""Refractive index of common materials used for optical coatings. Data taken from refractiveindex.info.
.. versionadded:: 10-28-2015
Added indices for rutile TiO2.
:param array wl: Wavelength axis in um.
:param str type: Type of coating ('MgF2_o', 'MgF2_e', 'ZnSe', 'TiO2_o', 'TiO2_e').
:returns: Rerfractive index (same shape as wl).
"""
if type == "TiO2_o":
return np.sqrt(5.913 + 0.2441 / (wl**2 - 0.0803))
elif type == "TiO2_e":
return np.sqrt(7.197 + 0.3322 / (wl**2 - 0.0843))
else:
Bdict = {"MgF2_o": [0.27620, 0.60967, 0.0080, 2.14973],
"MgF2_e": [0.25385, 0.66405, 1.0899, 0.1816, 2.1227],
"ZnSe": [4.45813734, 0.467216334, 2.89566290]}
Cdict = {"MgF2_o": [0.0, 0.08636**2, 18.0**2, 25.0**2],
"MgF2_e": [0.0, 0.08504**2, 22.2**2, 24.4**2, 40.6**2],
"ZnSe": [0.200859853**2, 0.391371166**2, 47.1362108**2]}
try:
args = Bdict[type] + Cdict[type]
except:
raise ValueError("unknown coating type!")
return n_sellmeier(wl, *args)
# #################################################################################################################
# refractive index of common liquids
# data taken from refractiveindex.info
# ----------------------------------------------------------------------------------------------------------------------------
def n_liquid(wl, type='water'):
"""Refractive index of common solvents. Data taken from refractiveindex.info.
:param array wl: Wavelength axis in um.
:param str type: Type of liquid / solvent ('water', 'cyclohexane', 'ethanol', 'methanol').
:returns: Rerfractive index (same shape as wl).
"""
if(type == "cyclohexane"):
return 1.41545 + 0.00369 / wl**2 + 0.00004 / wl**4
elif(type == "ethanol"):
return 1.35265 + 0.00306 / wl**2 + 0.00002 / wl**4
elif(type == "methanol"):
return 1.294611 + 12706.403e-6 / wl**2
else:
A = load_data("LIQUIDS_Water_Hale.txt", skiprows=1)
n0 = A[1] + 1j * A[2]
B = interp1d(A[0], n0, kind='cubic')
return B(wl)
# #################################################################################################################
# refractive index of polymers
# data taken partly from refractiveindex.info
def n_polymer(wl, type="PVA"):
"""Refractive index of some polymers. Data for PVA taken from *J. Phys. D* **44**, 205105 (2011).
:param array wl: Wavelength axis in um.
:param str type: Type of polymer ('PVA', 'PS' or 'polystyrene').
:returns: Rerfractive index (same shape as wl).
"""
if type == "PVA":
# taken from J. Phys. D 44, 205105 (2011)
return np.lib.scimath.sqrt(2.34 - 3.06e-2 * wl**2)
elif type == "PS" or type == "polystyrene":
# taken from refractiveindex.info
return np.lib.scimath.sqrt(1.0 + 1.4435 * wl**2 / (wl**2 - 0.020216))
return []
# #################################################################################################################
# Lorentz Oscillator Model - see http://de.wikipedia.org/wiki/Lorentzoszillator#
# wl is wavelength (in um)
# ebg is background dielectric constant
# p contains parameters for each oscillator (A, lambda0, dlambda)
def n_LorOsc(wl, ebg, *p):
"""Refractive index from Lorentz oscillator model. See http://de.wikipedia.org/wiki/Lorentzoszillator for more details.
:param array wl: Wavelength axis in um.
:param float ebg: Background / high frequency dielectric constant.
:param mixed p: Parameters for each oscillator / pole. For each pole provide (amplitude, resonance wavelength, width).
:returns: Complex refractive index (same shape as wl).
"""
eps = complex(ebg)
for i in range(int(len(p) / 3)):
eps += p[3 * i + 0] / ((1 / p[3 * i + 1])**2 - (1 / wl)**2 - 1j * p[3 * i + 2] / (p[3 * i + 1]**2 + p[3 * i + 2] * p[3 * i + 1]) / wl)
return np.lib.scimath.sqrt(eps)
# shortcut to absorption coefficient
# result in 1/um
def alpha_LorOsc(wl, ebg, *p):
"""Absorption coefficient from Lorentz oscillator model. See http://de.wikipedia.org/wiki/Lorentzoszillator for more details.
:param array wl: Wavelength axis in um.
:param float ebg: Background / high frequency dielectric constant.
:param mixed p: Parameters for each oscillator / pole. For each pole provide (amplitude, resonance wavelength, width).
:returns: Absorption coefficient (same shape as wl).
"""
return 4.0 * np.pi / wl * np.imag(n_LorOsc(wl, ebg, *p))
|
ddietze/FSRStools
|
refind/__init__.py
|
Python
|
gpl-3.0
| 21,337
|
[
"Gaussian"
] |
c69430270301b3afd002164f27ee8ab34e0a23e6f3f9f2b8b0fe19bac34c1830
|
"""
sewpy: Source Extractor Wrapper for Python
Recent improvements (latest on top):
- better verbosity about masked output of ASSOC procedure
- ASSOC helper implemented
- run() now returns a dict containing several objects, such as the output astropy table, catfilepath, workdir, and logfilepath.
- now also works with vector parameters such as MAG_APER(4)
- possibility to "nice" SExtractor
- a log file is written for every run() if not told otherwise
- filenames change according to FITS image file name where required
- but you can also pass an "imgname" argument to run, and this will be used instead.
- params and config files are written only once, as discussed
- appropriate warnings and behaviour when a workdir already exists, or when you rerun on the same file
- possibility to use existing param / config / conv / nnw files
- run() returns either the catalog, or the filepath to the catalog
To do:
- move "config" to run ?
- check that all masked columns of ASSOC do indeed share the same mask.
- implement _check_config()
- better detection of SExtractor failures
- implement raising Exceptions when SExtractor fails
- implement CHECK IMAGE "helper" ?
- give access to several conv and nnw settings (if needed)
Slightly modified by Song Huang 2014-10-29
"""
import os
import astropy
import astropy.table
import subprocess
import tempfile
import re
import copy
from datetime import datetime
import numpy as np
import logging
logger = logging.getLogger(__name__)
# Enrich the default output parameter by Song Huang
# Use: http://terapix.iap.fr/article.php?id_article=628 as reference
defaultparams = [ "X_IMAGE", "Y_IMAGE", "A_IMAGE", "B_IMAGE", "THETA_IMAGE",
"ERRA_IMAGE", "ERRB_IMAGE", "ALPHA_J2000", "DELTA_J2000",
"XWIN_IMAGE", "YWIN_IMAGE", "AWIN_IMAGE", "BWIN_IMAGE",
"THETAWIN_IMAGE", "ALPHAWIN_J2000", "DELTAWIN_J2000",
"FLUX_AUTO", "FLUXERR_AUTO", "FLUX_PETRO", "FLUXERR_PETRO",
"BACKGROUND", "THRESHOLD", "PETRO_RADIUS", "KRON_RADIUS",
"FLAGS", "CLASS_STAR"]
defaultconfig = {}
class SEW():
"""
Holds together all the settings to run SExtractor executable on one or several images.
"""
def __init__(self, workdir=None, sexpath="sex", params=None,
config=None, configfilepath=None, nice=None, suffix=None):
"""
All arguments have default values and are optional.
:param workdir: where I'll write my files. Specify this (e.g., "test") if you care about the
output files.
If None, I create a unique temporary directory myself, usually in /tmp.
:param sexpath: path to the sextractor executable (e.g., "sex" or "sextractor", if in your PATH)
:param params: the parameters you want SExtractor to measure (i.e., what you would write in the
"default.param" file)
:type params: list of strings
:param config: config settings that will supersede the default config (e.g., what you would
change in the "default.sex" file)
:type config: dict
:param configfilepath: specify this if you want me to use an existing SExtractor config file as
"default" (instead of the sextractor -d one)
:param nice: niceness with which I should run SExtractor
To use an existing SExtractor param-, conv-, or nnw-file, simply specify these in the config
dict, using the appropriate SExtractor keys (PARAMETERS_NAME, FILTER_NAME, ...)
.. warning:: When using *vector*-type params resulting in multiple columns (such as "FLUX_RADIUS(3)"
in the example above), do not put these in the last position of the params list, otherwise astropy
fails reading the catalog! This is probably due to the fact that the SExtractor header doesn't give
a hint that multiple columns are expected when a vector-type param comes last. A workaround would be
way too complicated.
"""
# We set up the trivial things:
self.sexpath = sexpath
self.configfilepath = configfilepath
self.nice = nice
# Add an option to apply a suffix to the output file
if suffix is not None:
suffix = suffix.strip()
if suffix[0] is not '_':
suffix = '_' + suffix
if suffix[-1] is '_':
suffix = suffix[:-2]
else:
suffix = ""
self.suffix = suffix
logger.info("SExtractor version is %s" % (self.get_version()))
# ... and the workdir
if workdir is not None:
self.workdir = workdir
self.tmp = False
if os.path.isdir(workdir):
logger.warning("SExtractor workdir '%s' exists, be careful! I will (maybe silently) delete or overwrite stuff." % (workdir))
else:
logger.info("Making new SExtractor workdir '%s'..." % (workdir))
os.makedirs(workdir)
else:
self.workdir = tempfile.mkdtemp(prefix='sewpy_workdir_')
self.tmp = True
#self._clean_workdir()
# No, don't clean it ! This is an obvious race conditions when several processes use the same workdir !
# Commenting this is just a quick fix, we need to clean this up.
# ... and the params:
if params == None:
self.params = defaultparams
else:
self.params = params
self._check_params()
# ... and the config:
if config == None:
self.config = defaultconfig
else:
self.config = config
self._set_instance_config() # Adds some fixed stuff to self.config
self._check_config()
def get_version(self):
"""
To find the SExtractor version, we call it without arguments and parse the stdout.
:returns: a string (e.g. '2.4.4')
"""
try:
p = subprocess.Popen([self.sexpath], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except:
raise RuntimeError("Could not run SExtractor. Is the path '%s' correct ? If not, specify sexpath='/path/to/sextractor'" % self.sexpath)
out, err = p.communicate()
version_match = re.search("[Vv]ersion ([0-9\.])+", err)
if version_match is False:
raise RuntimeError("Could not determine SExctractor version, check the output of running '%s'" % (self.sexpath))
version = str(version_match.group()[8:])
assert len(version) != 0
return version
def __str__(self):
"""
A string summary representing the instance
"""
return "'SEW object with workdir %s'" % (self.workdir)
def _check_params(self):
"""
Compares the params to a list of known params, and spits out a useful warning if
something seems fishy.
"""
strange_param_helper = False
for param in self.params:
# It could be that the param encapsulates several values (e.g., "FLUX_RADIUS(10)")
# So we have to dissect this
match = re.compile("(\w*)\(\d*\)").match(param)
if match:
cleanparam = match.group(1)
else:
cleanparam = param
if cleanparam not in self.fullparamlist:
logger.warning("Parameter '%s' seems strange and might be unknown to SExtractor" \
% (param))
strange_param_helper = True
if strange_param_helper:
logger.warning("Known parameters are: %s" % (self.fullparamtxt))
def _check_config(self):
"""
Not yet implemented
"""
pass
def _set_instance_config(self):
"""
Sets config parameters that remain fixed for this instance.
Called by __init__(). If needed, you could still mess with this config after __init__() has run.
"""
if "PARAMETERS_NAME" in self.config.keys():
logger.info("You specified your own PARAMETERS_NAME, I will use it.")
else:
self.config["PARAMETERS_NAME"] = self._get_params_filepath()
if "FILTER_NAME" in self.config.keys():
logger.info("You specified your own FILTER_NAME, I will use it.")
else:
self.config["FILTER_NAME"] = self._get_conv_filepath()
if "CATALOG_NAME" in self.config.keys():
logger.critical("You specified your own CATALOG_NAME, but I will *NOT* use it !")
del self.config["CATALOG_NAME"]
def _get_params_filepath(self):
"""
Stays the same for a given instance.
"""
return os.path.join(self.workdir, "sex" + self.suffix + ".param")
#return os.path.join(self.workdir, "params.txt")
def _get_config_filepath(self):
"""
Idem, stays the same for a given instance.
Might return the non-default configfilepath, if set.
"""
if self.configfilepath is None:
return os.path.join(self.workdir, "sex" + self.suffix
+ ".config")
else:
return self.configfilepath
def _get_conv_filepath(self):
"""
Stays the same for a given instance.
"""
return os.path.join(self.workdir, "conv.txt")
def _get_cat_filepath(self, imgname):
"""
This changes from image to image
"""
return os.path.join(self.workdir, imgname + self.suffix + ".cat")
def _get_assoc_filepath(self, imgname):
"""
Changes from image to image
"""
return os.path.join(self.workdir, imgname + self.suffix + ".assoc")
def _get_log_filepath(self, imgname):
"""
Changes from image to image
"""
return os.path.join(self.workdir, imgname + self.suffix + ".log")
def _write_params(self, force=False):
"""
Writes the parameters to the file, if needed.
:param force: if True, I overwrite any existing file.
"""
if force or not os.path.exists(self._get_params_filepath()):
f = open(self._get_params_filepath(), 'w')
f.write("\n".join(self.params))
f.write("\n")
f.close()
logger.debug("Wrote %s" % (self._get_params_filepath()))
else:
logger.debug("The params file already exists, I don't overwrite it.")
def _write_default_config(self, force=False):
"""
Writes the *default* config file, if needed.
I don't write this file if a specific config file is set.
:param force: if True, I overwrite any existing file.
"""
if self.configfilepath is not None:
logger.debug("You use the existing config file %s, I don't have to write one." % \
(self._get_config_filepath()))
return
if force or not os.path.exists(self._get_config_filepath()):
p = subprocess.Popen([self.sexpath, "-d"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate()
if err != "":
logger.warning("Ouch, SExtractor complains :")
logger.warning(err)
f = open(self._get_config_filepath(), 'w')
f.write(out)
f.close()
logger.debug("Wrote %s" % (self._get_config_filepath()))
else:
logger.debug("Default config file already exists, I don't overwrite it.")
def _write_default_conv(self):
"""
Writes the default convolution matrix, if needed.
"""
if not os.path.exists(self._get_conv_filepath()):
f = open(self._get_conv_filepath(), 'w')
f.write("""CONV NORM
# 3x3 ``all-ground'' convolution mask with FWHM = 2 pixels.
1 2 1
2 4 2
1 2 1""")
f.close()
logger.debug("Wrote %s" % (self._get_conv_filepath()))
else:
logger.debug("Default conv file already exists, I don't overwrite it.")
def _clean_workdir(self):
"""
Removes the config/param files related to this instance, to allow for a fresh restart.
Files related to specific images are not removed.
"""
toremove = [self._get_config_filepath(), self._get_params_filepath(), self._get_conv_filepath()]
for filepath in toremove:
if os.path.exists(filepath):
logger.debug("Removing existing file %s..." % (filepath))
os.remove(filepath)
def _write_assoc(self, cat, xname, yname, imgname):
"""
Writes a plain text file which can be used as sextractor input for the ASSOC identification.
And "index" for each source is generated, it gets used to identify galaxies.
"""
#if assoc_xname not in assoc_cat.colnames or assoc_yname not in assoc_cat.colnames:
# raise RuntimeError("I don't have columns %s or %s" % (assoc_xname, assoc_yname))
if os.path.exists(self._get_assoc_filepath(imgname)):
logger.warning("ASSOC file already exists, I will overwrite it")
lines = []
for (number, row) in enumerate(cat):
# Seems safe(r) to not use row.index but our own number.
lines.append("%.3f\t%.3f\t%i\n" % (row[xname], row[yname], number))
lines = "".join(lines)
f = open(self._get_assoc_filepath(imgname), "w")
f.writelines(lines)
f.close()
logger.debug("Wrote ASSOC file %s..." % (self._get_assoc_filepath(imgname)))
def _add_prefix(self, table, prefix):
"""
Modifies the column names of a table by prepending the prefix *in place*.
Skips the VECTOR_ASSOC stuff !
"""
if prefix == "":
return
for colname in table.colnames:
if colname not in ["VECTOR_ASSOC", "VECTOR_ASSOC_1", "VECTOR_ASSOC_2"]:
table.rename_column(colname, prefix + colname)
def __call__(self, imgfilepath, imgname=None, assoc_cat=None, assoc_xname="x", assoc_yname="y",
returncat=True, prefix="", writelog=True, writeparam=False,
writeconfig=False, writeconv=False):
"""
Runs SExtractor on a given image.
:param imgfilepath: Path to the input FITS image I should run on
:param assoc_cat: optional input catalog (astropy table), if you want to use the ASSOC helper
:param assoc_xname: x coordinate name I should use in the ASSOC helper
:param assoc_yname: idem
:param returncat: by default I read the SExtractor output catalog and return it as an astropy
table.
If set to False, I do not attempt to read it.
:param prefix: will be prepended to the column names of the astropy table that I return
:type prefix: string
:param writelog: if True I save the sextractor command line input and output into a dedicated
log file in the workdir.
:returns: a dict containing the keys:
* **catfilepath**: the path to the sextractor output catalog file
* **table**: the astropy table of the output catalog (if returncat was not set to False)
* **workdir**: the path to the workdir (all my internal files are there)
* **logfilepath**: the path to the SExtractor log file (in the workdir)
Everything related to this particular image stays within this method, the SExtractor instance
(in particular config) is not modified !
"""
starttime = datetime.now()
logger.info("Preparing to run SExtractor on %s..." % imgfilepath)
if imgname == None:
imgname = os.path.splitext(os.path.basename(imgfilepath))[0]
logger.debug("Using imgname %s..." % (imgname))
# We make a deep copy of the config, that we can modify with settings related to this particular
# image.
imgconfig = copy.deepcopy(self.config)
# We set the catalog name :
imgconfig["CATALOG_NAME"] = self._get_cat_filepath(imgname)
if os.path.exists(self._get_cat_filepath(imgname)):
logger.warning("Output catalog %s already exists, I will overwrite it" % (self._get_cat_filepath(imgname)))
# We prepare the ASSOC catalog file, if needed
if assoc_cat is not None:
logger.info("I will run in ASSOC mode, trying to find %i sources..." % (len(assoc_cat)))
if "VECTOR_ASSOC(3)" not in self.params:
raise RuntimeError("To use the ASSOC helper, you have to add 'VECTOR_ASSOC(3)' to the params")
if assoc_xname not in assoc_cat.colnames or assoc_yname not in assoc_cat.colnames:
raise RuntimeError("I don't have columns %s or %s" % (assoc_xname, assoc_yname))
if "VECTOR_ASSOC_2" in assoc_cat.colnames:
raise RuntimeError("Do not give me an assoc_cat that already contains a column VECTOR_ASSOC_2")
for param in self.params + [prefix + "assoc_flag"]:
# This is not 100% correct, as some params might be vectors.
if prefix + param in assoc_cat.colnames:
raise RuntimeError("Your assoc_cat already has a column named %s, fix this" % (prefix + param))
self._write_assoc(cat=assoc_cat, xname=assoc_xname, yname=assoc_yname, imgname=imgname)
imgconfig["ASSOC_DATA"] = "1, 2, 3"
imgconfig["ASSOC_NAME"] = self._get_assoc_filepath(imgname)
imgconfig["ASSOC_PARAMS"] = "1, 2"
if "ASSOC_RADIUS" not in imgconfig:
logger.warning("ASSOC_RADIUS not specified, using a default of 10.0")
imgconfig["ASSOC_RADIUS"] = 10.0
if "ASSOC_TYPE" not in imgconfig:
logger.warning("ASSOC_TYPE not specified, using a default NEAREST")
imgconfig["ASSOC_TYPE"] = "NEAREST"
if "ASSOCSELEC_TYPE" in imgconfig:
raise RuntimeError("Sorry, you cannot mess with ASSOCSELEC_TYPE yourself when using the helper. I'm using MATCHED.")
imgconfig["ASSOCSELEC_TYPE"] = "MATCHED"
# We write the input files (if needed)
self._write_default_config()
self._write_params()
self._write_default_conv()
# We build the command line arguments
popencmd = [self.sexpath, imgfilepath, "-c", self._get_config_filepath()]
if self.nice != None: # We prepend the nice command
popencmd[:0] = ["nice", "-n", str(self.nice)]
# We add the current state of config
for (key, value) in imgconfig.items():
popencmd.append("-"+str(key))
popencmd.append(str(value).replace(' ',''))
# And we run
logger.info("Starting SExtractor now, with niceness %s..." % (self.nice))
logger.debug("Running with command %s..." % (popencmd))
p = subprocess.Popen(popencmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate()
if writelog:
logfile = open(self._get_log_filepath(imgname), "w")
logfile.write("SExtractor was called with :\n")
logfile.write(" ".join(popencmd))
logfile.write("\n\nA nicer view of the config:\n")
logfile.write("\n".join(["%30s : %30s" % (str(key), str(value)) for (key, value) in imgconfig.items()]))
logfile.write("\n\n####### stdout #######\n")
logfile.write(out)
logfile.write("\n####### stderr #######\n")
logfile.write(err)
logfile.write("\n")
logfile.close()
logger.info("SExtractor stderr:")
logger.info(err)
if not "All done" in err:
logger.critical("Ouch, something seems wrong, check SExtractor log")
endtime = datetime.now()
logger.info("Running SExtractor done, it took %.2f seconds." % \
((endtime - starttime).total_seconds()))
# Let's check if this worked.
if not os.path.isfile(self._get_cat_filepath(imgname)):
raise RuntimeError("It seems that SExtractor did not write the file '%s', check log." % (self._get_cat_filepath(imgname)))
# We return a dict. It always contains at least the path to the sextractor catalog:
output = {"catfilepath":self._get_cat_filepath(imgname), "workdir":self.workdir}
if writelog:
output["logfilepath"] = self._get_log_filepath(imgname)
# And we read the output, if asked for:
if returncat:
if assoc_cat is None:
sextable = astropy.table.Table.read(self._get_cat_filepath(imgname),
format="ascii.sextractor")
print self._get_cat_filepath(imgname)
logger.info("Read %i objects from the SExtractor output catalog" % (len(sextable)))
self._add_prefix(sextable, prefix)
output["table"] = sextable
else: # We have to process the output catalog, merging it.
# We add the "number" column to the assoc_cat, calling it VECTOR_ASSOC_2:
intable = copy.deepcopy(assoc_cat)
intable["VECTOR_ASSOC_2"] = range(len(assoc_cat))
# We read in the SExtractor output:
sextable = astropy.table.Table.read(self._get_cat_filepath(imgname),
format="ascii.sextractor")
logger.info("Read %i objects from the SExtractor output catalog" % (len(sextable)))
self._add_prefix(sextable, prefix)
sextable.remove_columns(["VECTOR_ASSOC", "VECTOR_ASSOC_1"])
# Due to what seems to be a bug in SExtractor (version 2.19.5 and earlier),
# we need to kick out "duplicated" (same VECTOR_ASSOC_2) rows.
# That's weird, as in principle we asked to keep the NEAREST !
sortedassoc = np.sort(sextable["VECTOR_ASSOC_2"].data)
duplassoc = list(np.unique(sortedassoc[sortedassoc[1:] == sortedassoc[:-1]]))
# The unique is here as there might be more than 2 identical numbers...
if len(duplassoc) > 0:
logger.critical("%i sources from the SExtractor catalog are strange duplicates (bug ?), I discard them." % (len(duplassoc)))
rowindices_to_remove = []
for row in sextable:
if row["VECTOR_ASSOC_2"] in duplassoc:
rowindices_to_remove.append(row.index)
sextable.remove_rows(rowindices_to_remove)
# We merge the tables, keeping all entries of the "intable"
joined = astropy.table.join(intable, sextable,
join_type='left', keys='VECTOR_ASSOC_2',
# raises an error in case of metadata conflict.
metadata_conflicts = "error",
# Will only be used in case of column name conflicts.
table_names = ['ASSOC', 'SEx'],
uniq_col_name = "{table_name}_{col_name}"
)
# This join does not mix the order, as the output is sorted according to our own
# VECTOR_ASSOC_2
# We remove the last ASSOC column:
joined.remove_columns(["VECTOR_ASSOC_2"])
#assert len(intable) == len(joined)
# More explicit:
if not len(intable) == len(joined):
raise RuntimeError("Problem with joined tables: intable has %i rows, joined has %i. %s %s" % (len(intable), len(joined), intable.colnames, joined.colnames))
# The join might return a **masked** table.
# In any case, we add one simply-named column with a flag telling if the
# identification has worked.
if joined.masked:
logger.info("ASSOC join done, my output is a masked table.")
joined[prefix + "assoc_flag"] = joined[joined.colnames[-1]].mask == False
nfound = sum(joined[prefix + "assoc_flag"])
logger.info("I could find %i out of %i sources (%i are missing)" % \
(nfound, len(assoc_cat), len(assoc_cat)-nfound))
else:
logger.info("ASSOC join done, I could find all your sources, my output is not masked.")
joined[prefix + "assoc_flag"] = [True] * len(joined)
output["table"] = joined
return output
# def destroy(self):
# """
# Removes the complete working dir, careful with this.
# """
# # No, this is way to dangerous, workdir could be "."
# #shutil.rmtree(self.workdir)
# Some class attributes:
# We give this fullparamtxt here as some earlier versions of sextractor are not able to spit it out.
# It's only used to check your params for typos, anyway.
fullparamtxt = """
#NUMBER Running object number
#EXT_NUMBER FITS extension number
#FLUX_ISO Isophotal flux [count]
#FLUXERR_ISO RMS error for isophotal flux [count]
#MAG_ISO Isophotal magnitude [mag]
#MAGERR_ISO RMS error for isophotal magnitude [mag]
#FLUX_ISOCOR Corrected isophotal flux [count]
#FLUXERR_ISOCOR RMS error for corrected isophotal flux [count]
#MAG_ISOCOR Corrected isophotal magnitude [mag]
#MAGERR_ISOCOR RMS error for corrected isophotal magnitude [mag]
#FLUX_APER Flux vector within fixed circular aperture(s) [count]
#FLUXERR_APER RMS error vector for aperture flux(es) [count]
#MAG_APER Fixed aperture magnitude vector [mag]
#MAGERR_APER RMS error vector for fixed aperture mag. [mag]
#FLUX_AUTO Flux within a Kron-like elliptical aperture [count]
#FLUXERR_AUTO RMS error for AUTO flux [count]
#MAG_AUTO Kron-like elliptical aperture magnitude [mag]
#MAGERR_AUTO RMS error for AUTO magnitude [mag]
#FLUX_PETRO Flux within a Petrosian-like elliptical aperture [count]
#FLUXERR_PETRO RMS error for PETROsian flux [count]
#MAG_PETRO Petrosian-like elliptical aperture magnitude [mag]
#MAGERR_PETRO RMS error for PETROsian magnitude [mag]
#FLUX_BEST Best of FLUX_AUTO and FLUX_ISOCOR [count]
#FLUXERR_BEST RMS error for BEST flux [count]
#MAG_BEST Best of MAG_AUTO and MAG_ISOCOR [mag]
#MAGERR_BEST RMS error for MAG_BEST [mag]
#FLUX_WIN Gaussian-weighted flux [count]
#FLUXERR_WIN RMS error for WIN flux [count]
#MAG_WIN Gaussian-weighted magnitude [mag]
#MAGERR_WIN RMS error for MAG_WIN [mag]
#FLUX_SOMFIT Flux derived from SOM fit [count]
#FLUXERR_SOMFIT RMS error for SOMFIT flux [count]
#MAG_SOMFIT Magnitude derived from SOM fit [mag]
#MAGERR_SOMFIT Magnitude error derived from SOM fit [mag]
#ERROR_SOMFIT Reduced Chi-square error of the SOM fit
#VECTOR_SOMFIT Position vector of the winning SOM node
#KRON_RADIUS Kron apertures in units of A or B
#PETRO_RADIUS Petrosian apertures in units of A or B
#BACKGROUND Background at centroid position [count]
#THRESHOLD Detection threshold above background [count]
#FLUX_MAX Peak flux above background [count]
#ISOAREA_IMAGE Isophotal area above Analysis threshold [pixel**2]
#ISOAREAF_IMAGE Isophotal area (filtered) above Detection threshold [pixel**2]
#XMIN_IMAGE Minimum x-coordinate among detected pixels [pixel]
#YMIN_IMAGE Minimum y-coordinate among detected pixels [pixel]
#XMAX_IMAGE Maximum x-coordinate among detected pixels [pixel]
#YMAX_IMAGE Maximum y-coordinate among detected pixels [pixel]
#XPEAK_IMAGE x-coordinate of the brightest pixel [pixel]
#YPEAK_IMAGE y-coordinate of the brightest pixel [pixel]
#XPEAK_WORLD World-x coordinate of the brightest pixel [deg]
#YPEAK_WORLD World-y coordinate of the brightest pixel [deg]
#ALPHAPEAK_SKY Right ascension of brightest pix (native) [deg]
#DELTAPEAK_SKY Declination of brightest pix (native) [deg]
#ALPHAPEAK_J2000 Right ascension of brightest pix (J2000) [deg]
#DELTAPEAK_J2000 Declination of brightest pix (J2000) [deg]
#ALPHAPEAK_B1950 Right ascension of brightest pix (B1950) [deg]
#DELTAPEAK_B1950 Declination of brightest pix (B1950) [deg]
#X_IMAGE Object position along x [pixel]
#Y_IMAGE Object position along y [pixel]
#X_IMAGE_DBL Object position along x (double precision) [pixel]
#Y_IMAGE_DBL Object position along y (double precision) [pixel]
#X_WORLD Barycenter position along world x axis [deg]
#Y_WORLD Barycenter position along world y axis [deg]
#X_MAMA Barycenter position along MAMA x axis [m**(-6)]
#Y_MAMA Barycenter position along MAMA y axis [m**(-6)]
#ALPHA_SKY Right ascension of barycenter (native) [deg]
#DELTA_SKY Declination of barycenter (native) [deg]
#ALPHA_J2000 Right ascension of barycenter (J2000) [deg]
#DELTA_J2000 Declination of barycenter (J2000) [deg]
#ALPHA_B1950 Right ascension of barycenter (B1950) [deg]
#DELTA_B1950 Declination of barycenter (B1950) [deg]
#X2_IMAGE Variance along x [pixel**2]
#Y2_IMAGE Variance along y [pixel**2]
#XY_IMAGE Covariance between x and y [pixel**2]
#X2_WORLD Variance along X-WORLD (alpha) [deg**2]
#Y2_WORLD Variance along Y-WORLD (delta) [deg**2]
#XY_WORLD Covariance between X-WORLD and Y-WORLD [deg**2]
#CXX_IMAGE Cxx object ellipse parameter [pixel**(-2)]
#CYY_IMAGE Cyy object ellipse parameter [pixel**(-2)]
#CXY_IMAGE Cxy object ellipse parameter [pixel**(-2)]
#CXX_WORLD Cxx object ellipse parameter (WORLD units) [deg**(-2)]
#CYY_WORLD Cyy object ellipse parameter (WORLD units) [deg**(-2)]
#CXY_WORLD Cxy object ellipse parameter (WORLD units) [deg**(-2)]
#A_IMAGE Profile RMS along major axis [pixel]
#B_IMAGE Profile RMS along minor axis [pixel]
#THETA_IMAGE Position angle (CCW/x) [deg]
#A_WORLD Profile RMS along major axis (world units) [deg]
#B_WORLD Profile RMS along minor axis (world units) [deg]
#THETA_WORLD Position angle (CCW/world-x) [deg]
#THETA_SKY Position angle (east of north) (native) [deg]
#THETA_J2000 Position angle (east of north) (J2000) [deg]
#THETA_B1950 Position angle (east of north) (B1950) [deg]
#ERRX2_IMAGE Variance of position along x [pixel**2]
#ERRY2_IMAGE Variance of position along y [pixel**2]
#ERRXY_IMAGE Covariance of position between x and y [pixel**2]
#ERRX2_WORLD Variance of position along X-WORLD (alpha) [deg**2]
#ERRY2_WORLD Variance of position along Y-WORLD (delta) [deg**2]
#ERRXY_WORLD Covariance of position X-WORLD/Y-WORLD [deg**2]
#ERRCXX_IMAGE Cxx error ellipse parameter [pixel**(-2)]
#ERRCYY_IMAGE Cyy error ellipse parameter [pixel**(-2)]
#ERRCXY_IMAGE Cxy error ellipse parameter [pixel**(-2)]
#ERRCXX_WORLD Cxx error ellipse parameter (WORLD units) [deg**(-2)]
#ERRCYY_WORLD Cyy error ellipse parameter (WORLD units) [deg**(-2)]
#ERRCXY_WORLD Cxy error ellipse parameter (WORLD units) [deg**(-2)]
#ERRA_IMAGE RMS position error along major axis [pixel]
#ERRB_IMAGE RMS position error along minor axis [pixel]
#ERRTHETA_IMAGE Error ellipse position angle (CCW/x) [deg]
#ERRA_WORLD World RMS position error along major axis [deg]
#ERRB_WORLD World RMS position error along minor axis [deg]
#ERRTHETA_WORLD Error ellipse pos. angle (CCW/world-x) [deg]
#ERRTHETA_SKY Native error ellipse pos. angle (east of north) [deg]
#ERRTHETA_J2000 J2000 error ellipse pos. angle (east of north) [deg]
#ERRTHETA_B1950 B1950 error ellipse pos. angle (east of north) [deg]
#XWIN_IMAGE Windowed position estimate along x [pixel]
#YWIN_IMAGE Windowed position estimate along y [pixel]
#XWIN_WORLD Windowed position along world x axis [deg]
#YWIN_WORLD Windowed position along world y axis [deg]
#ALPHAWIN_SKY Windowed right ascension (native) [deg]
#DELTAWIN_SKY Windowed declination (native) [deg]
#ALPHAWIN_J2000 Windowed right ascension (J2000) [deg]
#DELTAWIN_J2000 windowed declination (J2000) [deg]
#ALPHAWIN_B1950 Windowed right ascension (B1950) [deg]
#DELTAWIN_B1950 Windowed declination (B1950) [deg]
#X2WIN_IMAGE Windowed variance along x [pixel**2]
#Y2WIN_IMAGE Windowed variance along y [pixel**2]
#XYWIN_IMAGE Windowed covariance between x and y [pixel**2]
#X2WIN_WORLD Windowed variance along X-WORLD (alpha) [deg**2]
#Y2WIN_WORLD Windowed variance along Y-WORLD (delta) [deg**2]
#XYWIN_WORLD Windowed covariance between X-WORLD and Y-WORLD [deg**2]
#CXXWIN_IMAGE Windowed Cxx object ellipse parameter [pixel**(-2)]
#CYYWIN_IMAGE Windowed Cyy object ellipse parameter [pixel**(-2)]
#CXYWIN_IMAGE Windowed Cxy object ellipse parameter [pixel**(-2)]
#CXXWIN_WORLD Windowed Cxx object ellipse parameter (WORLD units) [deg**(-2)]
#CYYWIN_WORLD Windowed Cyy object ellipse parameter (WORLD units) [deg**(-2)]
#CXYWIN_WORLD Windowed Cxy object ellipse parameter (WORLD units) [deg**(-2)]
#AWIN_IMAGE Windowed profile RMS along major axis [pixel]
#BWIN_IMAGE Windowed profile RMS along minor axis [pixel]
#THETAWIN_IMAGE Windowed position angle (CCW/x) [deg]
#AWIN_WORLD Windowed profile RMS along major axis (world units) [deg]
#BWIN_WORLD Windowed profile RMS along minor axis (world units) [deg]
#THETAWIN_WORLD Windowed position angle (CCW/world-x) [deg]
#THETAWIN_SKY Windowed position angle (east of north) (native) [deg]
#THETAWIN_J2000 Windowed position angle (east of north) (J2000) [deg]
#THETAWIN_B1950 Windowed position angle (east of north) (B1950) [deg]
#ERRX2WIN_IMAGE Variance of windowed pos along x [pixel**2]
#ERRY2WIN_IMAGE Variance of windowed pos along y [pixel**2]
#ERRXYWIN_IMAGE Covariance of windowed pos between x and y [pixel**2]
#ERRX2WIN_WORLD Variance of windowed pos along X-WORLD (alpha) [deg**2]
#ERRY2WIN_WORLD Variance of windowed pos along Y-WORLD (delta) [deg**2]
#ERRXYWIN_WORLD Covariance of windowed pos X-WORLD/Y-WORLD [deg**2]
#ERRCXXWIN_IMAGE Cxx windowed error ellipse parameter [pixel**(-2)]
#ERRCYYWIN_IMAGE Cyy windowed error ellipse parameter [pixel**(-2)]
#ERRCXYWIN_IMAGE Cxy windowed error ellipse parameter [pixel**(-2)]
#ERRCXXWIN_WORLD Cxx windowed error ellipse parameter (WORLD units) [deg**(-2)]
#ERRCYYWIN_WORLD Cyy windowed error ellipse parameter (WORLD units) [deg**(-2)]
#ERRCXYWIN_WORLD Cxy windowed error ellipse parameter (WORLD units) [deg**(-2)]
#ERRAWIN_IMAGE RMS windowed pos error along major axis [pixel]
#ERRBWIN_IMAGE RMS windowed pos error along minor axis [pixel]
#ERRTHETAWIN_IMAGE Windowed error ellipse pos angle (CCW/x) [deg]
#ERRAWIN_WORLD World RMS windowed pos error along major axis [deg]
#ERRBWIN_WORLD World RMS windowed pos error along minor axis [deg]
#ERRTHETAWIN_WORLD Windowed error ellipse pos. angle (CCW/world-x) [deg]
#ERRTHETAWIN_SKY Native windowed error ellipse pos. angle (east of north) [deg]
#ERRTHETAWIN_J2000 J2000 windowed error ellipse pos. angle (east of north) [deg]
#ERRTHETAWIN_B1950 B1950 windowed error ellipse pos. angle (east of north) [deg]
#NITER_WIN Number of iterations for WIN centering
#MU_THRESHOLD Detection threshold above background [mag * arcsec**(-2)]
#MU_MAX Peak surface brightness above background [mag * arcsec**(-2)]
#ISOAREA_WORLD Isophotal area above Analysis threshold [deg**2]
#ISOAREAF_WORLD Isophotal area (filtered) above Detection threshold [deg**2]
#ISO0 Isophotal area at level 0 [pixel**2]
#ISO1 Isophotal area at level 1 [pixel**2]
#ISO2 Isophotal area at level 2 [pixel**2]
#ISO3 Isophotal area at level 3 [pixel**2]
#ISO4 Isophotal area at level 4 [pixel**2]
#ISO5 Isophotal area at level 5 [pixel**2]
#ISO6 Isophotal area at level 6 [pixel**2]
#ISO7 Isophotal area at level 7 [pixel**2]
#FLAGS Extraction flags
#FLAGS_WEIGHT Weighted extraction flags
#FLAGS_WIN Flags for WINdowed parameters
#IMAFLAGS_ISO FLAG-image flags OR'ed over the iso. profile
#NIMAFLAGS_ISO Number of flagged pixels entering IMAFLAGS_ISO
#FWHM_IMAGE FWHM assuming a gaussian core [pixel]
#FWHM_WORLD FWHM assuming a gaussian core [deg]
#ELONGATION A_IMAGE/B_IMAGE
#ELLIPTICITY 1 - B_IMAGE/A_IMAGE
#POLAR_IMAGE (A_IMAGE^2 - B_IMAGE^2)/(A_IMAGE^2 + B_IMAGE^2)
#POLAR_WORLD (A_WORLD^2 - B_WORLD^2)/(A_WORLD^2 + B_WORLD^2)
#POLARWIN_IMAGE (AWIN^2 - BWIN^2)/(AWIN^2 + BWIN^2)
#POLARWIN_WORLD (AWIN^2 - BWIN^2)/(AWIN^2 + BWIN^2)
#CLASS_STAR S/G classifier output
#VIGNET Pixel data around detection [count]
#VIGNET_SHIFT Pixel data around detection, corrected for shift [count]
#VECTOR_ASSOC ASSOCiated parameter vector
#NUMBER_ASSOC Number of ASSOCiated IDs
#THRESHOLDMAX Maximum threshold possible for detection [count]
#FLUX_GROWTH Cumulated growth-curve [count]
#FLUX_GROWTHSTEP Step for growth-curves [pixel]
#MAG_GROWTH Cumulated magnitude growth-curve [mag]
#MAG_GROWTHSTEP Step for growth-curves [pixel]
#FLUX_RADIUS Fraction-of-light radii [pixel]
#XPSF_IMAGE X coordinate from PSF-fitting [pixel]
#YPSF_IMAGE Y coordinate from PSF-fitting [pixel]
#XPSF_WORLD PSF position along world x axis [deg]
#YPSF_WORLD PSF position along world y axis [deg]
#ALPHAPSF_SKY Right ascension of the fitted PSF (native) [deg]
#DELTAPSF_SKY Declination of the fitted PSF (native) [deg]
#ALPHAPSF_J2000 Right ascension of the fitted PSF (J2000) [deg]
#DELTAPSF_J2000 Declination of the fitted PSF (J2000) [deg]
#ALPHAPSF_B1950 Right ascension of the fitted PSF (B1950) [deg]
#DELTAPSF_B1950 Declination of the fitted PSF (B1950) [deg]
#FLUX_PSF Flux from PSF-fitting [count]
#FLUXERR_PSF RMS flux error for PSF-fitting [count]
#MAG_PSF Magnitude from PSF-fitting [mag]
#MAGERR_PSF RMS magnitude error from PSF-fitting [mag]
#NITER_PSF Number of iterations for PSF-fitting
#CHI2_PSF Reduced chi2 from PSF-fitting
#ERRX2PSF_IMAGE Variance of PSF position along x [pixel**2]
#ERRY2PSF_IMAGE Variance of PSF position along y [pixel**2]
#ERRXYPSF_IMAGE Covariance of PSF position between x and y [pixel**2]
#ERRX2PSF_WORLD Variance of PSF position along X-WORLD (alpha) [deg**2]
#ERRY2PSF_WORLD Variance of PSF position along Y-WORLD (delta) [deg**2]
#ERRXYPSF_WORLD Covariance of PSF position X-WORLD/Y-WORLD [deg**2]
#ERRCXXPSF_IMAGE Cxx PSF error ellipse parameter [pixel**(-2)]
#ERRCYYPSF_IMAGE Cyy PSF error ellipse parameter [pixel**(-2)]
#ERRCXYPSF_IMAGE Cxy PSF error ellipse parameter [pixel**(-2)]
#ERRCXXPSF_WORLD Cxx PSF error ellipse parameter (WORLD units) [deg**(-2)]
#ERRCYYPSF_WORLD Cyy PSF error ellipse parameter (WORLD units) [deg**(-2)]
#ERRCXYPSF_WORLD Cxy PSF error ellipse parameter (WORLD units) [deg**(-2)]
#ERRAPSF_IMAGE PSF RMS position error along major axis [pixel]
#ERRBPSF_IMAGE PSF RMS position error along minor axis [pixel]
#ERRTHTPSF_IMAGE PSF error ellipse position angle (CCW/x) [deg]
#ERRAPSF_WORLD World PSF RMS position error along major axis [pixel]
#ERRBPSF_WORLD World PSF RMS position error along minor axis [pixel]
#ERRTHTPSF_WORLD PSF error ellipse pos. angle (CCW/world-x) [deg]
#ERRTHTPSF_SKY Native PSF error ellipse pos. angle (east of north) [deg]
#ERRTHTPSF_J2000 J2000 PSF error ellipse pos. angle (east of north) [deg]
#ERRTHTPSF_B1950 B1950 PSF error ellipse pos. angle (east of north) [deg]
#VECTOR_MODEL Model-fitting coefficients
#VECTOR_MODELERR Model-fitting coefficient uncertainties
#CHI2_MODEL Reduced Chi2 of the fit
#FLAGS_MODEL Model-fitting flags
#NITER_MODEL Number of iterations for model-fitting
#FLUX_MODEL Flux from model-fitting [count]
#FLUXERR_MODEL RMS error on model-fitting flux [count]
#MAG_MODEL Magnitude from model-fitting [mag]
#MAGERR_MODEL RMS error on model-fitting magnitude [mag]
#XMODEL_IMAGE X coordinate from model-fitting [pixel]
#YMODEL_IMAGE Y coordinate from model-fitting [pixel]
#XMODEL_WORLD Fitted position along world x axis [deg]
#YMODEL_WORLD Fitted position along world y axis [deg]
#ALPHAMODEL_SKY Fitted position along right ascension (native) [deg]
#DELTAMODEL_SKY Fitted position along declination (native) [deg]
#ALPHAMODEL_J2000 Fitted position along right ascension (J2000) [deg]
#DELTAMODEL_J2000 Fitted position along declination (J2000) [deg]
#ALPHAMODEL_B1950 Fitted position along right ascension (B1950) [deg]
#DELTAMODEL_B1950 Fitted position along declination (B1950) [deg]
#ERRX2MODEL_IMAGE Variance of fitted position along x [pixel**2]
#ERRY2MODEL_IMAGE Variance of fitted position along y [pixel**2]
#ERRXYMODEL_IMAGE Covariance of fitted position between x and y [pixel**2]
#ERRX2MODEL_WORLD Variance of fitted position along X-WORLD (alpha) [deg**2]
#ERRY2MODEL_WORLD Variance of fitted position along Y-WORLD (delta) [deg**2]
#ERRXYMODEL_WORLD Covariance of fitted position X-WORLD/Y-WORLD [deg**2]
#ERRCXXMODEL_IMAGE Cxx error ellipse parameter of fitted position [pixel**(-2)]
#ERRCYYMODEL_IMAGE Cyy error ellipse parameter of fitted position [pixel**(-2)]
#ERRCXYMODEL_IMAGE Cxy error ellipse parameter of fitted position [pixel**(-2)]
#ERRCXXMODEL_WORLD Cxx fitted error ellipse parameter (WORLD units) [deg**(-2)]
#ERRCYYMODEL_WORLD Cyy fitted error ellipse parameter (WORLD units) [deg**(-2)]
#ERRCXYMODEL_WORLD Cxy fitted error ellipse parameter (WORLD units) [deg**(-2)]
#ERRAMODEL_IMAGE RMS error of fitted position along major axis [pixel]
#ERRBMODEL_IMAGE RMS error of fitted position along minor axis [pixel]
#ERRTHETAMODEL_IMAGE Error ellipse pos.angle of fitted position (CCW/x) [deg]
#ERRAMODEL_WORLD World RMS error of fitted position along major axis [deg]
#ERRBMODEL_WORLD World RMS error of fitted position along minor axis [deg]
#ERRTHETAMODEL_WORLD Error ellipse pos.angle of fitted position (CCW/world-x) [deg]
#ERRTHETAMODEL_SKY Native fitted error ellipse pos. angle (east of north) [deg]
#ERRTHETAMODEL_J2000 J2000 fitted error ellipse pos. angle (east of north) [deg]
#ERRTHETAMODEL_B1950 B1950 fitted error ellipse pos. angle (east of north) [deg]
#X2MODEL_IMAGE Variance along x from model-fitting [pixel**2]
#Y2MODEL_IMAGE Variance along y from model-fitting [pixel**2]
#XYMODEL_IMAGE Covariance between x and y from model-fitting [pixel**2]
#E1MODEL_IMAGE Ellipticity component from model-fitting
#E2MODEL_IMAGE Ellipticity component from model-fitting
#EPS1MODEL_IMAGE Ellipticity component (quadratic) from model-fitting
#EPS2MODEL_IMAGE Ellipticity component (quadratic) from model-fitting
#CONCENTRATION_MODEL Concentration parameter from model-fitting
#CLASS_STAR_MODEL S/G classifier from model-fitting
#FLUX_BACKOFFSET Background offset from fitting [count]
#FLUXERR_BACKOFFSET RMS error on fitted background offset [count]
#FLUX_SPHEROID Spheroid total flux from fitting [count]
#FLUXERR_SPHEROID RMS error on fitted spheroid total flux [count]
#MAG_SPHEROID Spheroid total magnitude from fitting [mag]
#MAGERR_SPHEROID RMS error on fitted spheroid total magnitude [mag]
#SPHEROID_REFF_IMAGE Spheroid effective radius from fitting [pixel]
#SPHEROID_REFFERR_IMAGE RMS error on fitted spheroid effective radius [pixel]
#SPHEROID_REFF_WORLD Spheroid effective radius from fitting [deg]
#SPHEROID_REFFERR_WORLD RMS error on fitted spheroid effective radius [deg]
#SPHEROID_ASPECT_IMAGE Spheroid aspect ratio from fitting
#SPHEROID_ASPECTERR_IMA RMS error on fitted spheroid aspect ratio
#SPHEROID_ASPECT_WORLD Spheroid aspect ratio from fitting
#SPHEROID_ASPECTERR_WOR RMS error on fitted spheroid aspect ratio
#SPHEROID_THETA_IMAGE Spheroid position angle (CCW/x) from fitting [deg]
#SPHEROID_THETAERR_IMAG RMS error on spheroid position angle [deg]
#SPHEROID_THETA_WORLD Spheroid position angle (CCW/world-x) [deg]
#SPHEROID_THETAERR_WORL RMS error on spheroid position angle [deg]
#SPHEROID_THETA_SKY Spheroid position angle (east of north, native) [deg]
#SPHEROID_THETA_J2000 Spheroid position angle (east of north, J2000) [deg]
#SPHEROID_THETA_B1950 Spheroid position angle (east of north, B1950) [deg]
#SPHEROID_SERSICN Spheroid Sersic index from fitting
#SPHEROID_SERSICNERR RMS error on fitted spheroid Sersic index
#FLUX_DISK Disk total flux from fitting [count]
#FLUXERR_DISK RMS error on fitted disk total flux [count]
#MAG_DISK Disk total magnitude from fitting [mag]
#MAGERR_DISK RMS error on fitted disk total magnitude [mag]
#DISK_SCALE_IMAGE Disk scalelength from fitting [pixel]
#DISK_SCALEERR_IMAGE RMS error on fitted disk scalelength [pixel]
#DISK_SCALE_WORLD Disk scalelength from fitting (world coords) [deg]
#DISK_SCALEERR_WORLD RMS error on fitted disk scalelength (world coords) [deg]
#DISK_ASPECT_IMAGE Disk aspect ratio from fitting
#DISK_ASPECTERR_IMAGE RMS error on fitted disk aspect ratio
#DISK_ASPECT_WORLD Disk aspect ratio from fitting
#DISK_ASPECTERR_WORLD RMS error on disk aspect ratio
#DISK_INCLINATION Disk inclination from fitting [deg]
#DISK_INCLINATIONERR RMS error on disk inclination from fitting [deg]
#DISK_THETA_IMAGE Disk position angle (CCW/x) from fitting [deg]
#DISK_THETAERR_IMAGE RMS error on fitted disk position angle [deg]
#DISK_THETA_WORLD Disk position angle (CCW/world-x) [deg]
#DISK_THETAERR_WORLD RMS error on disk position angle [deg]
#DISK_THETA_SKY Disk position angle (east of north, native) [deg]
#DISK_THETA_J2000 Disk position angle (east of north, J2000) [deg]
#DISK_THETA_B1950 Disk position angle (east of north, B1950) [deg]
#DISK_PATTERN_VECTOR Disk pattern fitted coefficients
#DISK_PATTERNMOD_VECTOR Disk pattern fitted moduli
#DISK_PATTERNARG_VECTOR Disk pattern fitted arguments [deg]
#DISK_PATTERN_SPIRAL Disk pattern spiral index
"""
# We turn this text block into a list of the parameter names:
fullparamlist = map(lambda s: s[1:-1], re.compile("#\w*\s").findall(fullparamtxt))
|
dr-guangtou/hs_galphot
|
mask/hsRunSewpy.py
|
Python
|
bsd-3-clause
| 51,622
|
[
"Gaussian"
] |
3d18228bfdd88f0ca2537dcc47bc7265c75237170756c874dfeac5412e076bb5
|
# Copyright 2015 Emille Ishida
# This program is distributed under the terms of the GNU General Purpose License (GPL).
# Refer to http://www.gnu.org/licenses/gpl.txt
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Fit and plot a light curve using Gaussian Process.
usage:
In order to fit a GP and plot the result, do
$ fit_plot_lc.py -i <user.input> -c 1
in case you are only interested in plotting a previously calculated
result,
$ fit_plot_lc.py -i <user.input> -c 0
"""
#!/usr/bin/env python
from __future__ import division
import argparse
import matplotlib.pyplot as plt
import numpy as np
from snclass.util import read_user_input, read_snana_lc
from snclass.fit_lc_gptools import fit_lc
from snclass.functions import screen
def main(args):
"""Read user input, fit and plot a GP and the raw data."""
# read_user_input
user_input = read_user_input(args.input)
# read lc data
lc_data = read_snana_lc(user_input)
# add extra keys
lc_data.update(user_input)
# set screen output
out = bool(int(user_input['screen'][0]))
if user_input['measurement'][0] == 'flux':
ylabel = 'flux'
sign = 1.0
else:
ylabel = 'magnitude'
sign = -1.0
if bool(int(args.calculate)):
screen('Fitting SN' + lc_data['SNID:'][0], user_input)
if user_input['measurement'][0] == 'flux':
p1 = [int(user_input['epoch_predict'][0]),
int(user_input['epoch_predict'][1])]
sign2 = 1.0
else:
p1 = None
sign2 = -1.0
# fit lc
lc_data = fit_lc(lc_data, samples=bool(int(lc_data['n_samples'][0])),
save_samples=bool(int(user_input['save_samples'][0])),
screen=out,
do_mcmc=bool(int(user_input['do_mcmc'][0])),
predict=p1)
else:
sign2 = 1.0
if bool(int(lc_data['n_samples'][0])):
op1 = open(lc_data['samples_dir'][0] + lc_data['file_root'][0] + \
lc_data['SNID:'][0] + '_' + user_input['measurement'][0] + '_samples.dat', 'r')
lin1 = op1.readlines()
op1.close()
d1 = [elem.split() for elem in lin1]
for fil in lc_data['filters']:
lc_data['xarr'][fil] = []
if bool(int(lc_data['n_samples'][0])):
lc_data['realizations'][fil] = [[float(d1[kk][jj])
for kk in xrange(len(d1))
if d1[kk][0]==fil]
for jj in xrange(2,
int(lc_data['n_samples'][0]) + 2)]
for i1 in xrange(len(d1)):
if d1[i1][0] == fil:
lc_data['xarr'][fil].append(float(d1[i1][1]))
op2 = open(lc_data['samples_dir'][0] + lc_data['file_root'][0] + \
lc_data['SNID:'][0] + '_' + user_input['measurement'][0] + '_mean.dat', 'r')
lin2 = op2.readlines()
op2.close()
d2 = [elem.split() for elem in lin2]
lc_data['GP_std'] = {}
for fil in lc_data['filters']:
lc_data['xarr'][fil] = []
lc_data['GP_fit'][fil] = np.array([float(d2[j][2])
for j in xrange(1,len(d2)) if d2[j][0] == fil])
lc_data['GP_std'][fil] = np.array([float(d2[j][3])
for j in xrange(1,len(d2)) if d2[j][0] == fil])
lc_data['xarr'][fil] = np.array([float(d2[j][1])
for j in xrange(1,len(d2)) if d2[j][0] == fil])
#initiate figure
f = plt.figure()
for fil in user_input['filters']:
# Plot the samples in data space.
plt.subplot(2, len(lc_data['filters'])/2 +
len(lc_data['filters'])%2,
lc_data['filters'].index(fil) + 1)
if bool(int(lc_data['n_samples'][0])):
for s in lc_data['realizations'][fil]:
plt.plot(lc_data['xarr'][fil], sign2 * np.array(s), color="gray", alpha=0.3)
plt.errorbar(lc_data[fil][:,0], sign * lc_data[fil][:,1],
yerr=lc_data[fil][:,2], fmt="o", color='blue', label=fil)
plt.plot(lc_data['xarr'][fil], sign2 * lc_data['GP_fit'][fil],
color='red', linewidth=2)
plt.ylabel(ylabel)
plt.xlabel("MJD")
plt.legend()
plt.xlim(min(lc_data['xarr'][fil]) - 1.0, max(lc_data['xarr'][fil]) + 1.0)
if user_input['measurement'][0] == 'mag':
plt.ylim(min(sign * lc_data[fil][:,1]) - 1.5*max(lc_data[fil][:,2]),max(sign * lc_data[fil][:,1]) + 1.5*max(lc_data[fil][:,2]))
ax = plt.gca()
ax.invert_yaxis()
f.tight_layout()
plt.savefig("gp-SN" + lc_data['SNID:'][0] + "_" + user_input['measurement'][0] + ".png", dpi=350)
plt.close()
if __name__=='__main__':
#get user input file name
parser = argparse.ArgumentParser(description='Supernova photometric ' + \
'classification using KPCA.')
parser.add_argument('-i','--input', help='Input file name',
required = True)
parser.add_argument('-c', '--calculate', help='Read or calculate GP fit',
required=True)
args = parser.parse_args()
main(args)
|
emilleishida/snclass
|
snclass/bin/fit_plot_lc.py
|
Python
|
gpl-3.0
| 6,196
|
[
"Gaussian"
] |
dc3cde17d1383cac8b7f3cf2c5a131417cefe1ecdc5a8e0a6974e912f2ecf17f
|
# -*- coding: utf-8 -*-
#GSASIImath - major mathematics routines
########### SVN repository information ###################
# $Date: 2018-06-14 01:48:33 +0300 (Thu, 14 Jun 2018) $
# $Author: vondreele $
# $Revision: 3434 $
# $URL: https://subversion.xray.aps.anl.gov/pyGSAS/trunk/GSASIImath.py $
# $Id: GSASIImath.py 3434 2018-06-13 22:48:33Z vondreele $
########### SVN repository information ###################
'''
*GSASIImath: computation module*
================================
Routines for least-squares minimization and other stuff
'''
from __future__ import division, print_function
import random as rn
import numpy as np
import numpy.linalg as nl
import numpy.ma as ma
import time
import math
import copy
import GSASIIpath
GSASIIpath.SetVersionNumber("$Revision: 3434 $")
import GSASIIElem as G2el
import GSASIIlattice as G2lat
import GSASIIspc as G2spc
import GSASIIpwd as G2pwd
import numpy.fft as fft
import scipy.optimize as so
try:
import pypowder as pwd
except ImportError:
print ('pypowder is not available - profile calcs. not allowed')
sind = lambda x: np.sin(x*np.pi/180.)
cosd = lambda x: np.cos(x*np.pi/180.)
tand = lambda x: np.tan(x*np.pi/180.)
asind = lambda x: 180.*np.arcsin(x)/np.pi
acosd = lambda x: 180.*np.arccos(x)/np.pi
atand = lambda x: 180.*np.arctan(x)/np.pi
atan2d = lambda y,x: 180.*np.arctan2(y,x)/np.pi
twopi = 2.0*np.pi
twopisq = 2.0*np.pi**2
nxs = np.newaxis
################################################################################
##### Hessian least-squares Levenberg-Marquardt routine
################################################################################
def pinv(a, rcond=1e-15 ):
"""
Compute the (Moore-Penrose) pseudo-inverse of a matrix.
Modified from numpy.linalg.pinv; assumes a is Hessian & returns no. zeros found
Calculate the generalized inverse of a matrix using its
singular-value decomposition (SVD) and including all
*large* singular values.
:param array a: (M, M) array_like - here assumed to be LS Hessian
Matrix to be pseudo-inverted.
:param float rcond: Cutoff for small singular values.
Singular values smaller (in modulus) than
`rcond` * largest_singular_value (again, in modulus)
are set to zero.
:returns: B : (M, M) ndarray
The pseudo-inverse of `a`
Raises: LinAlgError
If the SVD computation does not converge.
Notes:
The pseudo-inverse of a matrix A, denoted :math:`A^+`, is
defined as: "the matrix that 'solves' [the least-squares problem]
:math:`Ax = b`," i.e., if :math:`\\bar{x}` is said solution, then
:math:`A^+` is that matrix such that :math:`\\bar{x} = A^+b`.
It can be shown that if :math:`Q_1 \\Sigma Q_2^T = A` is the singular
value decomposition of A, then
:math:`A^+ = Q_2 \\Sigma^+ Q_1^T`, where :math:`Q_{1,2}` are
orthogonal matrices, :math:`\\Sigma` is a diagonal matrix consisting
of A's so-called singular values, (followed, typically, by
zeros), and then :math:`\\Sigma^+` is simply the diagonal matrix
consisting of the reciprocals of A's singular values
(again, followed by zeros). [1]
References:
.. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando, FL, Academic Press, Inc., 1980, pp. 139-142.
"""
u, s, vt = nl.svd(a, 0)
cutoff = rcond*np.maximum.reduce(s)
s = np.where(s>cutoff,1./s,0.)
nzero = s.shape[0]-np.count_nonzero(s)
# res = np.dot(np.transpose(vt), np.multiply(s[:, np.newaxis], np.transpose(u)))
res = np.dot(vt.T,s[:,nxs]*u.T)
return res,nzero
def HessianLSQ(func,x0,Hess,args=(),ftol=1.49012e-8,xtol=1.e-6, maxcyc=0,lamda=-3,Print=False):
"""
Minimize the sum of squares of a function (:math:`f`) evaluated on a series of
values (y): :math:`\sum_{y=0}^{N_{obs}} f(y,{args})`
where :math:`x = arg min(\sum_{y=0}^{N_{obs}} (func(y)^2,axis=0))`
:param function func: callable method or function
should take at least one (possibly length N vector) argument and
returns M floating point numbers.
:param np.ndarray x0: The starting estimate for the minimization of length N
:param function Hess: callable method or function
A required function or method to compute the weighted vector and Hessian for func.
It must be a symmetric NxN array
:param tuple args: Any extra arguments to func are placed in this tuple.
:param float ftol: Relative error desired in the sum of squares.
:param float xtol: Relative tolerance of zeros in the SVD solution in nl.pinv.
:param int maxcyc: The maximum number of cycles of refinement to execute, if -1 refine
until other limits are met (ftol, xtol)
:param int lamda: initial Marquardt lambda=10**lamda
:param bool Print: True for printing results (residuals & times) by cycle
:returns: (x,cov_x,infodict) where
* x : ndarray
The solution (or the result of the last iteration for an unsuccessful
call).
* cov_x : ndarray
Uses the fjac and ipvt optional outputs to construct an
estimate of the jacobian around the solution. ``None`` if a
singular matrix encountered (indicates very flat curvature in
some direction). This matrix must be multiplied by the
residual standard deviation to get the covariance of the
parameter estimates -- see curve_fit.
* infodict : dict
a dictionary of optional outputs with the keys:
* 'fvec' : the function evaluated at the output
* 'num cyc':
* 'nfev':
* 'lamMax':
* 'psing':
* 'SVD0':
"""
ifConverged = False
deltaChi2 = -10.
x0 = np.array(x0, ndmin=1) #might be redundant?
n = len(x0)
if type(args) != type(()):
args = (args,)
icycle = 0
One = np.ones((n,n))
lam = 10.**lamda
lamMax = lam
nfev = 0
if Print:
print (' Hessian Levenburg-Marquardt SVD refinement on %d variables:'%(n))
Lam = np.zeros((n,n))
while icycle < maxcyc:
time0 = time.time()
M = func(x0,*args)
Nobs = len(M)
nfev += 1
chisq0 = np.sum(M**2)
Yvec,Amat = Hess(x0,*args)
Adiag = np.sqrt(np.diag(Amat))
psing = np.where(np.abs(Adiag) < 1.e-14,True,False)
if np.any(psing): #hard singularity in matrix
return [x0,None,{'num cyc':icycle,'fvec':M,'nfev':nfev,'lamMax':lamMax,'psing':psing,'SVD0':-1}]
Anorm = np.outer(Adiag,Adiag)
Yvec /= Adiag
Amat /= Anorm
if Print:
print ('initial chi^2 %.5g on %d obs.'%(chisq0,Nobs))
chitol = ftol
while True:
Lam = np.eye(Amat.shape[0])*lam
Amatlam = Amat*(One+Lam)
try:
Ainv,Nzeros = pinv(Amatlam,xtol) #do Moore-Penrose inversion (via SVD)
except nl.LinAlgError:
print ('ouch #1 bad SVD inversion; change parameterization')
psing = list(np.where(np.diag(nl.qr(Amatlam)[1]) < 1.e-14)[0])
return [x0,None,{'num cyc':icycle,'fvec':M,'nfev':nfev,'lamMax':lamMax,'psing':psing,'SVD0':-1}]
Xvec = np.inner(Ainv,Yvec) #solve
Xvec /= Adiag
M2 = func(x0+Xvec,*args)
nfev += 1
chisq1 = np.sum(M2**2)
if chisq1 > chisq0*(1.+chitol): #TODO put Alan Coehlo's criteria for lambda here?
lam *= 10.
if Print:
print ('new chi^2 %.5g on %d obs., %d SVD zeros ; matrix modification needed; lambda now %.1e' \
%(chisq1,Nobs,Nzeros,lam))
else:
x0 += Xvec
lam /= 10.
break
if lam > 10.:
print ('ouch #3 chisq1 %g.4 stuck > chisq0 %g.4'%(chisq1,chisq0))
break
chitol *= 2
lamMax = max(lamMax,lam)
deltaChi2 = (chisq0-chisq1)/chisq0
if Print:
print (' Cycle: %d, Time: %.2fs, Chi**2: %.5g for %d obs., Lambda: %.3g, Delta: %.3g'%(
icycle,time.time()-time0,chisq1,Nobs,lamMax,deltaChi2))
if deltaChi2 < ftol:
ifConverged = True
if Print: print ("converged")
break
icycle += 1
M = func(x0,*args)
nfev += 1
Yvec,Amat = Hess(x0,*args)
Adiag = np.sqrt(np.diag(Amat))
Anorm = np.outer(Adiag,Adiag)
Lam = np.eye(Amat.shape[0])*lam
Amatlam = Amat/Anorm
try:
Bmat,Nzero = pinv(Amatlam,xtol) #Moore-Penrose inversion (via SVD) & count of zeros
if Print: print ('Found %d SVD zeros'%(Nzero))
Bmat = Bmat/Anorm
return [x0,Bmat,{'num cyc':icycle,'fvec':M,'nfev':nfev,'lamMax':lamMax,'psing':[],'SVD0':Nzero,'Converged': ifConverged, 'DelChi2':deltaChi2}]
except nl.LinAlgError:
print ('ouch #2 linear algebra error in making v-cov matrix')
psing = []
if maxcyc:
psing = list(np.where(np.diag(nl.qr(Amat)[1]) < 1.e-14)[0])
return [x0,None,{'num cyc':icycle,'fvec':M,'nfev':nfev,'lamMax':lamMax,'psing':psing,'SVD0':-1}]
def HessianSVD(func,x0,Hess,args=(),ftol=1.49012e-8,xtol=1.e-6, maxcyc=0,lamda=-3,Print=False):
"""
Minimize the sum of squares of a function (:math:`f`) evaluated on a series of
values (y): :math:`\sum_{y=0}^{N_{obs}} f(y,{args})`
where :math:`x = arg min(\sum_{y=0}^{N_{obs}} (func(y)^2,axis=0))`
:param function func: callable method or function
should take at least one (possibly length N vector) argument and
returns M floating point numbers.
:param np.ndarray x0: The starting estimate for the minimization of length N
:param function Hess: callable method or function
A required function or method to compute the weighted vector and Hessian for func.
It must be a symmetric NxN array
:param tuple args: Any extra arguments to func are placed in this tuple.
:param float ftol: Relative error desired in the sum of squares.
:param float xtol: Relative tolerance of zeros in the SVD solution in nl.pinv.
:param int maxcyc: The maximum number of cycles of refinement to execute, if -1 refine
until other limits are met (ftol, xtol)
:param bool Print: True for printing results (residuals & times) by cycle
:returns: (x,cov_x,infodict) where
* x : ndarray
The solution (or the result of the last iteration for an unsuccessful
call).
* cov_x : ndarray
Uses the fjac and ipvt optional outputs to construct an
estimate of the jacobian around the solution. ``None`` if a
singular matrix encountered (indicates very flat curvature in
some direction). This matrix must be multiplied by the
residual standard deviation to get the covariance of the
parameter estimates -- see curve_fit.
* infodict : dict
a dictionary of optional outputs with the keys:
* 'fvec' : the function evaluated at the output
* 'num cyc':
* 'nfev':
* 'lamMax':0.
* 'psing':
* 'SVD0':
"""
ifConverged = False
deltaChi2 = -10.
x0 = np.array(x0, ndmin=1) #might be redundant?
n = len(x0)
if type(args) != type(()):
args = (args,)
icycle = 0
nfev = 0
if Print:
print (' Hessian SVD refinement on %d variables:'%(n))
while icycle < maxcyc:
time0 = time.time()
M = func(x0,*args)
nfev += 1
chisq0 = np.sum(M**2)
Yvec,Amat = Hess(x0,*args)
Adiag = np.sqrt(np.diag(Amat))
psing = np.where(np.abs(Adiag) < 1.e-14,True,False)
if np.any(psing): #hard singularity in matrix
return [x0,None,{'num cyc':icycle,'fvec':M,'nfev':nfev,'lamMax':0.,'psing':psing,'SVD0':-1}]
Anorm = np.outer(Adiag,Adiag)
Yvec /= Adiag
Amat /= Anorm
if Print:
print ('initial chi^2 %.5g'%(chisq0))
try:
Ainv,Nzeros = pinv(Amat,xtol) #do Moore-Penrose inversion (via SVD)
except nl.LinAlgError:
print ('ouch #1 bad SVD inversion; change parameterization')
psing = list(np.where(np.diag(nl.qr(Amat)[1]) < 1.e-14)[0])
return [x0,None,{'num cyc':icycle,'fvec':M,'nfev':nfev,'lamMax':0.,'psing':psing,'SVD0':-1}]
Xvec = np.inner(Ainv,Yvec) #solve
Xvec /= Adiag
M2 = func(x0+Xvec,*args)
nfev += 1
chisq1 = np.sum(M2**2)
deltaChi2 = (chisq0-chisq1)/chisq0
if Print:
print (' Cycle: %d, Time: %.2fs, Chi**2: %.5g, Delta: %.3g'%(
icycle,time.time()-time0,chisq1,deltaChi2))
if deltaChi2 < ftol:
ifConverged = True
if Print: print ("converged")
break
icycle += 1
M = func(x0,*args)
nfev += 1
Yvec,Amat = Hess(x0,*args)
Adiag = np.sqrt(np.diag(Amat))
Anorm = np.outer(Adiag,Adiag)
Amat = Amat/Anorm
try:
Bmat,Nzero = pinv(Amat,xtol) #Moore-Penrose inversion (via SVD) & count of zeros
print ('Found %d SVD zeros'%(Nzero))
# Bmat = nl.inv(Amatlam); Nzeros = 0
Bmat = Bmat/Anorm
return [x0,Bmat,{'num cyc':icycle,'fvec':M,'nfev':nfev,'lamMax':0.,'psing':[],
'SVD0':Nzero,'Converged': ifConverged, 'DelChi2':deltaChi2}]
except nl.LinAlgError:
print ('ouch #2 linear algebra error in making v-cov matrix')
psing = []
if maxcyc:
psing = list(np.where(np.diag(nl.qr(Amat)[1]) < 1.e-14)[0])
return [x0,None,{'num cyc':icycle,'fvec':M,'nfev':nfev,'lamMax':0.,'psing':psing,'SVD0':-1}]
def getVCov(varyNames,varyList,covMatrix):
'''obtain variance-covariance terms for a set of variables. NB: the varyList
and covMatrix were saved by the last least squares refinement so they must match.
:param list varyNames: variable names to find v-cov matric for
:param list varyList: full list of all variables in v-cov matrix
:param nparray covMatrix: full variance-covariance matrix from the last
least squares refinement
:returns: nparray vcov: variance-covariance matrix for the variables given
in varyNames
'''
vcov = np.zeros((len(varyNames),len(varyNames)))
for i1,name1 in enumerate(varyNames):
for i2,name2 in enumerate(varyNames):
try:
vcov[i1][i2] = covMatrix[varyList.index(name1)][varyList.index(name2)]
except ValueError:
vcov[i1][i2] = 0.0
# if i1 == i2:
# vcov[i1][i2] = 1e-20
# else:
# vcov[i1][i2] = 0.0
return vcov
################################################################################
##### Atom manipulations
################################################################################
def FindMolecule(ind,generalData,atomData): #uses numpy & masks - very fast even for proteins!
def getNeighbors(atom,radius):
Dx = UAtoms-np.array(atom[cx:cx+3])
dist = ma.masked_less(np.sqrt(np.sum(np.inner(Amat,Dx)**2,axis=0)),0.5) #gets rid of disorder "bonds" < 0.5A
sumR = Radii+radius
return set(ma.nonzero(ma.masked_greater(dist-factor*sumR,0.))[0]) #get indices of bonded atoms
import numpy.ma as ma
indices = (-1,0,1)
Units = np.array([[h,k,l] for h in indices for k in indices for l in indices],dtype='f')
cx,ct,cs,ci = generalData['AtomPtrs']
DisAglCtls = generalData['DisAglCtls']
SGData = generalData['SGData']
Amat,Bmat = G2lat.cell2AB(generalData['Cell'][1:7])
radii = DisAglCtls['BondRadii']
atomTypes = DisAglCtls['AtomTypes']
factor = DisAglCtls['Factors'][0]
unit = np.zeros(3)
try:
indH = atomTypes.index('H')
radii[indH] = 0.5
except:
pass
nAtom = len(atomData)
Indx = range(nAtom)
UAtoms = []
Radii = []
for atom in atomData:
UAtoms.append(np.array(atom[cx:cx+3]))
Radii.append(radii[atomTypes.index(atom[ct])])
UAtoms = np.array(UAtoms)
Radii = np.array(Radii)
for nOp,Op in enumerate(SGData['SGOps'][1:]):
UAtoms = np.concatenate((UAtoms,(np.inner(Op[0],UAtoms[:nAtom]).T+Op[1])))
Radii = np.concatenate((Radii,Radii[:nAtom]))
Indx += Indx[:nAtom]
for icen,cen in enumerate(SGData['SGCen'][1:]):
UAtoms = np.concatenate((UAtoms,(UAtoms+cen)))
Radii = np.concatenate((Radii,Radii))
Indx += Indx[:nAtom]
if SGData['SGInv']:
UAtoms = np.concatenate((UAtoms,-UAtoms))
Radii = np.concatenate((Radii,Radii))
Indx += Indx
UAtoms %= 1.
mAtoms = len(UAtoms)
for unit in Units:
if np.any(unit): #skip origin cell
UAtoms = np.concatenate((UAtoms,UAtoms[:mAtoms]+unit))
Radii = np.concatenate((Radii,Radii[:mAtoms]))
Indx += Indx[:mAtoms]
UAtoms = np.array(UAtoms)
Radii = np.array(Radii)
newAtoms = [atomData[ind],]
atomData[ind] = None
radius = Radii[ind]
IndB = getNeighbors(newAtoms[-1],radius)
while True:
if not len(IndB):
break
indb = IndB.pop()
if atomData[Indx[indb]] == None:
continue
while True:
try:
jndb = IndB.index(indb)
IndB.remove(jndb)
except:
break
newAtom = copy.copy(atomData[Indx[indb]])
newAtom[cx:cx+3] = UAtoms[indb] #NB: thermal Uij, etc. not transformed!
newAtoms.append(newAtom)
atomData[Indx[indb]] = None
IndB = set(list(IndB)+list(getNeighbors(newAtoms[-1],radius)))
if len(IndB) > nAtom:
return 'Assemble molecule cannot be used on extended structures'
for atom in atomData:
if atom != None:
newAtoms.append(atom)
return newAtoms
def FindAtomIndexByIDs(atomData,loc,IDs,Draw=True):
'''finds the set of atom array indices for a list of atom IDs. Will search
either the Atom table or the drawAtom table.
:param list atomData: Atom or drawAtom table containting coordinates, etc.
:param int loc: location of atom id in atomData record
:param list IDs: atom IDs to be found
:param bool Draw: True if drawAtom table to be searched; False if Atom table
is searched
:returns: list indx: atom (or drawAtom) indices
'''
indx = []
for i,atom in enumerate(atomData):
if Draw and atom[loc] in IDs:
indx.append(i)
elif atom[loc] in IDs:
indx.append(i)
return indx
def FillAtomLookUp(atomData,indx):
'''create a dictionary of atom indexes with atom IDs as keys
:param list atomData: Atom table to be used
:returns: dict atomLookUp: dictionary of atom indexes with atom IDs as keys
'''
atomLookUp = {}
for iatm,atom in enumerate(atomData):
atomLookUp[atom[indx]] = iatm
return atomLookUp
def GetAtomsById(atomData,atomLookUp,IdList):
'''gets a list of atoms from Atom table that match a set of atom IDs
:param list atomData: Atom table to be used
:param dict atomLookUp: dictionary of atom indexes with atom IDs as keys
:param list IdList: atom IDs to be found
:returns: list atoms: list of atoms found
'''
atoms = []
for id in IdList:
atoms.append(atomData[atomLookUp[id]])
return atoms
def GetAtomItemsById(atomData,atomLookUp,IdList,itemLoc,numItems=1):
'''gets atom parameters for atoms using atom IDs
:param list atomData: Atom table to be used
:param dict atomLookUp: dictionary of atom indexes with atom IDs as keys
:param list IdList: atom IDs to be found
:param int itemLoc: pointer to desired 1st item in an atom table entry
:param int numItems: number of items to be retrieved
:returns: type name: description
'''
Items = []
if not isinstance(IdList,list):
IdList = [IdList,]
for id in IdList:
if numItems == 1:
Items.append(atomData[atomLookUp[id]][itemLoc])
else:
Items.append(atomData[atomLookUp[id]][itemLoc:itemLoc+numItems])
return Items
def GetAtomCoordsByID(pId,parmDict,AtLookup,indx):
'''default doc string
:param type name: description
:returns: type name: description
'''
pfx = [str(pId)+'::A'+i+':' for i in ['x','y','z']]
dpfx = [str(pId)+'::dA'+i+':' for i in ['x','y','z']]
XYZ = []
for ind in indx:
names = [pfx[i]+str(AtLookup[ind]) for i in range(3)]
dnames = [dpfx[i]+str(AtLookup[ind]) for i in range(3)]
XYZ.append([parmDict[name]+parmDict[dname] for name,dname in zip(names,dnames)])
return XYZ
def GetAtomFracByID(pId,parmDict,AtLookup,indx):
'''default doc string
:param type name: description
:returns: type name: description
'''
pfx = str(pId)+'::Afrac:'
Frac = []
for ind in indx:
name = pfx+str(AtLookup[ind])
Frac.append(parmDict[name])
return Frac
# for Atom in Atoms:
# XYZ = Atom[cx:cx+3]
# if 'A' in Atom[cia]:
# U6 = Atom[cia+2:cia+8]
def ApplySeqData(data,seqData):
'''Applies result from seq. refinement to drawing atom positions & Uijs
'''
generalData = data['General']
SGData = generalData['SGData']
cx,ct,cs,cia = generalData['AtomPtrs']
drawingData = data['Drawing']
dcx,dct,dcs,dci = drawingData['atomPtrs']
atoms = data['Atoms']
drawAtoms = drawingData['Atoms']
pId = data['pId']
pfx = '%d::'%(pId)
parmDict = seqData['parmDict']
for ia,atom in enumerate(atoms):
dxyz = np.array([parmDict[pfx+'dAx:'+str(ia)],parmDict[pfx+'dAy:'+str(ia)],parmDict[pfx+'dAz:'+str(ia)]])
if atom[cia] == 'A':
atuij = np.array([parmDict[pfx+'AU11:'+str(ia)],parmDict[pfx+'AU22:'+str(ia)],parmDict[pfx+'AU33:'+str(ia)],
parmDict[pfx+'AU12:'+str(ia)],parmDict[pfx+'AU13:'+str(ia)],parmDict[pfx+'AU23:'+str(ia)]])
else:
atuiso = parmDict[pfx+'AUiso:'+str(ia)]
atxyz = G2spc.MoveToUnitCell(np.array(atom[cx:cx+3])+dxyz)[0]
indx = FindAtomIndexByIDs(drawAtoms,dci,[atom[cia+8],],True)
for ind in indx:
drawatom = drawAtoms[ind]
opr = drawatom[dcs-1]
#how do I handle Sfrac? - fade the atoms?
if atom[cia] == 'A':
X,U = G2spc.ApplyStringOps(opr,SGData,atxyz,atuij)
drawatom[dcx:dcx+3] = X
drawatom[dci-6:dci] = U
else:
X = G2spc.ApplyStringOps(opr,SGData,atxyz)
drawatom[dcx:dcx+3] = X
drawatom[dci-7] = atuiso
return drawAtoms
def FindNeighbors(phase,FrstName,AtNames,notName=''):
General = phase['General']
cx,ct,cs,cia = General['AtomPtrs']
Atoms = phase['Atoms']
atNames = [atom[ct-1] for atom in Atoms]
Cell = General['Cell'][1:7]
Amat,Bmat = G2lat.cell2AB(Cell)
atTypes = General['AtomTypes']
Radii = np.array(General['BondRadii'])
DisAglCtls = General['DisAglCtls']
radiusFactor = DisAglCtls['Factors'][0]
AtInfo = dict(zip(atTypes,Radii)) #or General['BondRadii']
Orig = atNames.index(FrstName)
OId = Atoms[Orig][cia+8]
OType = Atoms[Orig][ct]
XYZ = getAtomXYZ(Atoms,cx)
Neigh = []
Ids = []
Dx = np.inner(Amat,XYZ-XYZ[Orig]).T
dist = np.sqrt(np.sum(Dx**2,axis=1))
sumR = np.array([AtInfo[OType]+AtInfo[atom[ct]] for atom in Atoms])
IndB = ma.nonzero(ma.masked_greater(dist-radiusFactor*sumR,0.))
for j in IndB[0]:
if j != Orig:
if AtNames[j] != notName:
Neigh.append([AtNames[j],dist[j],True])
Ids.append(Atoms[j][cia+8])
return Neigh,[OId,Ids]
def FindAllNeighbors(phase,FrstName,AtNames,notName=''):
General = phase['General']
cx,ct,cs,cia = General['AtomPtrs']
Atoms = phase['Atoms']
atNames = [atom[ct-1] for atom in Atoms]
Cell = General['Cell'][1:7]
Amat,Bmat = G2lat.cell2AB(Cell)
SGData = General['SGData']
indices = (-1,0,1)
Units = np.array([[h,k,l] for h in indices for k in indices for l in indices])
atTypes = General['AtomTypes']
Radii = np.array(General['BondRadii'])
DisAglCtls = General['DisAglCtls']
radiusFactor = DisAglCtls['Factors'][0]
AtInfo = dict(zip(atTypes,Radii)) #or General['BondRadii']
Orig = atNames.index(FrstName)
OId = Atoms[Orig][cia+8]
OType = Atoms[Orig][ct]
XYZ = getAtomXYZ(Atoms,cx)
Oxyz = XYZ[Orig]
Neigh = []
Ids = []
sumR = np.array([AtInfo[OType]+AtInfo[atom[ct]] for atom in Atoms])
sumR = np.reshape(np.tile(sumR,27),(27,-1))
results = []
for xyz in XYZ:
results.append(G2spc.GenAtom(xyz,SGData,False,Move=False))
for iA,result in enumerate(results):
if iA != Orig:
for [Txyz,Top,Tunit,Spn] in result:
Dx = np.array([Txyz-Oxyz+unit for unit in Units])
dx = np.inner(Dx,Amat)
dist = np.sqrt(np.sum(dx**2,axis=1))
IndB = ma.nonzero(ma.masked_greater(dist-radiusFactor*sumR[:,iA],0.))
# GSASIIpath.IPyBreak()
for iU in IndB[0]:
if AtNames[iA] != notName:
unit = Units[iU]
if np.any(unit):
Topstr = ' +(%4d)[%2d,%2d,%2d]'%(Top,unit[0],unit[1],unit[2])
else:
Topstr = ' +(%4d)'%(Top)
Neigh.append([AtNames[iA]+Topstr,dist[iU]])
Ids.append(Atoms[iA][cia+8])
return Neigh,[OId,Ids]
def calcBond(A,Ax,Bx,MTCU):
cell = G2lat.A2cell(A)
Amat,Bmat = G2lat.cell2AB(cell)
M,T,C,U = MTCU
Btx = np.inner(M,Bx)+T+C+U
Dx = Btx-Ax
dist = np.sqrt(np.inner(Amat,Dx))
return dist
def AddHydrogens(AtLookUp,General,Atoms,AddHydId):
def getTransMat(RXYZ,OXYZ,TXYZ,Amat):
Vec = np.inner(Amat,np.array([OXYZ-TXYZ[0],RXYZ-TXYZ[0]])).T
Vec /= np.sqrt(np.sum(Vec**2,axis=1))[:,nxs]
Mat2 = np.cross(Vec[0],Vec[1]) #UxV
Mat2 /= np.sqrt(np.sum(Mat2**2))
Mat3 = np.cross(Mat2,Vec[0]) #(UxV)xU
return nl.inv(np.array([Vec[0],Mat2,Mat3]))
cx,ct,cs,cia = General['AtomPtrs']
Cell = General['Cell'][1:7]
Amat,Bmat = G2lat.cell2AB(Cell)
nBonds = AddHydId[-1]+len(AddHydId[1])
Oatom = GetAtomsById(Atoms,AtLookUp,[AddHydId[0],])[0]
OXYZ = np.array(Oatom[cx:cx+3])
if 'I' in Oatom[cia]:
Uiso = Oatom[cia+1]
else:
Uiso = (Oatom[cia+2]+Oatom[cia+3]+Oatom[cia+4])/3.0 #simple average
Uiso = max(Uiso,0.005) #set floor!
Tatoms = GetAtomsById(Atoms,AtLookUp,AddHydId[1])
TXYZ = np.array([tatom[cx:cx+3] for tatom in Tatoms]) #3 x xyz
if nBonds == 4:
if AddHydId[-1] == 1:
Vec = TXYZ-OXYZ
Len = np.sqrt(np.sum(np.inner(Amat,Vec).T**2,axis=0))
Vec = np.sum(Vec/Len,axis=0)
Len = np.sqrt(np.sum(Vec**2))
Hpos = OXYZ-0.98*np.inner(Bmat,Vec).T/Len
HU = 1.1*Uiso
return [Hpos,],[HU,]
elif AddHydId[-1] == 2:
Vec = np.inner(Amat,TXYZ-OXYZ).T
Vec[0] += Vec[1] #U - along bisector
Vec /= np.sqrt(np.sum(Vec**2,axis=1))[:,nxs]
Mat2 = np.cross(Vec[0],Vec[1]) #UxV
Mat2 /= np.sqrt(np.sum(Mat2**2))
Mat3 = np.cross(Mat2,Vec[0]) #(UxV)xU
iMat = nl.inv(np.array([Vec[0],Mat2,Mat3]))
Hpos = np.array([[-0.97*cosd(54.75),0.97*sind(54.75),0.],
[-0.97*cosd(54.75),-0.97*sind(54.75),0.]])
HU = 1.2*Uiso*np.ones(2)
Hpos = np.inner(Bmat,np.inner(iMat,Hpos).T).T+OXYZ
return Hpos,HU
else:
Ratom = GetAtomsById(Atoms,AtLookUp,[AddHydId[2],])[0]
RXYZ = np.array(Ratom[cx:cx+3])
iMat = getTransMat(RXYZ,OXYZ,TXYZ,Amat)
a = 0.96*cosd(70.5)
b = 0.96*sind(70.5)
Hpos = np.array([[a,0.,-b],[a,-b*cosd(30.),0.5*b],[a,b*cosd(30.),0.5*b]])
Hpos = np.inner(Bmat,np.inner(iMat,Hpos).T).T+OXYZ
HU = 1.5*Uiso*np.ones(3)
return Hpos,HU
elif nBonds == 3:
if AddHydId[-1] == 1:
Vec = np.sum(TXYZ-OXYZ,axis=0)
Len = np.sqrt(np.sum(np.inner(Amat,Vec).T**2))
Vec = -0.93*Vec/Len
Hpos = OXYZ+Vec
HU = 1.1*Uiso
return [Hpos,],[HU,]
elif AddHydId[-1] == 2:
Ratom = GetAtomsById(Atoms,AtLookUp,[AddHydId[2],])[0]
RXYZ = np.array(Ratom[cx:cx+3])
iMat = getTransMat(RXYZ,OXYZ,TXYZ,Amat)
a = 0.93*cosd(60.)
b = 0.93*sind(60.)
Hpos = [[a,b,0],[a,-b,0]]
Hpos = np.inner(Bmat,np.inner(iMat,Hpos).T).T+OXYZ
HU = 1.2*Uiso*np.ones(2)
return Hpos,HU
else: #2 bonds
if 'C' in Oatom[ct]:
Vec = TXYZ[0]-OXYZ
Len = np.sqrt(np.sum(np.inner(Amat,Vec).T**2))
Vec = -0.93*Vec/Len
Hpos = OXYZ+Vec
HU = 1.1*Uiso
return [Hpos,],[HU,]
elif 'O' in Oatom[ct]:
mapData = General['Map']
Ratom = GetAtomsById(Atoms,AtLookUp,[AddHydId[2],])[0]
RXYZ = np.array(Ratom[cx:cx+3])
iMat = getTransMat(RXYZ,OXYZ,TXYZ,Amat)
a = 0.82*cosd(70.5)
b = 0.82*sind(70.5)
azm = np.arange(0.,360.,5.)
Hpos = np.array([[a,b*cosd(x),b*sind(x)] for x in azm])
Hpos = np.inner(Bmat,np.inner(iMat,Hpos).T).T+OXYZ
Rhos = np.array([getRho(pos,mapData) for pos in Hpos])
imax = np.argmax(Rhos)
HU = 1.5*Uiso
return [Hpos[imax],],[HU,]
return [],[]
#def AtomUij2TLS(atomData,atPtrs,Amat,Bmat,rbObj): #unfinished & not used
# '''default doc string
#
# :param type name: description
#
# :returns: type name: description
#
# '''
# for atom in atomData:
# XYZ = np.inner(Amat,atom[cx:cx+3])
# if atom[cia] == 'A':
# UIJ = atom[cia+2:cia+8]
def TLS2Uij(xyz,g,Amat,rbObj): #not used anywhere, but could be?
'''default doc string
:param type name: description
:returns: type name: description
'''
TLStype,TLS = rbObj['ThermalMotion'][:2]
Tmat = np.zeros((3,3))
Lmat = np.zeros((3,3))
Smat = np.zeros((3,3))
gvec = np.sqrt(np.array([g[0][0]**2,g[1][1]**2,g[2][2]**2,
g[0][0]*g[1][1],g[0][0]*g[2][2],g[1][1]*g[2][2]]))
if 'T' in TLStype:
Tmat = G2lat.U6toUij(TLS[:6])
if 'L' in TLStype:
Lmat = G2lat.U6toUij(TLS[6:12])
if 'S' in TLStype:
Smat = np.array([[TLS[18],TLS[12],TLS[13]],[TLS[14],TLS[19],TLS[15]],[TLS[16],TLS[17],0] ])
XYZ = np.inner(Amat,xyz)
Axyz = np.array([[ 0,XYZ[2],-XYZ[1]], [-XYZ[2],0,XYZ[0]], [XYZ[1],-XYZ[0],0]] )
Umat = Tmat+np.inner(Axyz,Smat)+np.inner(Smat.T,Axyz.T)+np.inner(np.inner(Axyz,Lmat),Axyz.T)
beta = np.inner(np.inner(g,Umat),g)
return G2lat.UijtoU6(beta)*gvec
def AtomTLS2UIJ(atomData,atPtrs,Amat,rbObj): #not used anywhere, but could be?
'''default doc string
:param type name: description
:returns: type name: description
'''
cx,ct,cs,cia = atPtrs
TLStype,TLS = rbObj['ThermalMotion'][:2]
Tmat = np.zeros((3,3))
Lmat = np.zeros((3,3))
Smat = np.zeros((3,3))
G,g = G2lat.A2Gmat(Amat)
gvec = 1./np.sqrt(np.array([g[0][0],g[1][1],g[2][2],g[0][1],g[0][2],g[1][2]]))
if 'T' in TLStype:
Tmat = G2lat.U6toUij(TLS[:6])
if 'L' in TLStype:
Lmat = G2lat.U6toUij(TLS[6:12])
if 'S' in TLStype:
Smat = np.array([ [TLS[18],TLS[12],TLS[13]], [TLS[14],TLS[19],TLS[15]], [TLS[16],TLS[17],0] ])
for atom in atomData:
XYZ = np.inner(Amat,atom[cx:cx+3])
Axyz = np.array([ 0,XYZ[2],-XYZ[1], -XYZ[2],0,XYZ[0], XYZ[1],-XYZ[0],0],ndmin=2 )
if 'U' in TLStype:
atom[cia+1] = TLS[0]
atom[cia] = 'I'
else:
atom[cia] = 'A'
Umat = Tmat+np.inner(Axyz,Smat)+np.inner(Smat.T,Axyz.T)+np.inner(np.inner(Axyz,Lmat),Axyz.T)
beta = np.inner(np.inner(g,Umat),g)
atom[cia+2:cia+8] = G2spc.U2Uij(beta/gvec)
def GetXYZDist(xyz,XYZ,Amat):
'''gets distance from position xyz to all XYZ, xyz & XYZ are np.array
and are in crystal coordinates; Amat is crystal to Cart matrix
:param type name: description
:returns: type name: description
'''
return np.sqrt(np.sum(np.inner(Amat,XYZ-xyz)**2,axis=0))
def getAtomXYZ(atoms,cx):
'''default doc string
:param type name: description
:returns: type name: description
'''
XYZ = []
for atom in atoms:
XYZ.append(atom[cx:cx+3])
return np.array(XYZ)
def getRBTransMat(X,Y):
'''Get transformation for Cartesian axes given 2 vectors
X will be parallel to new X-axis; X cross Y will be new Z-axis &
(X cross Y) cross Y will be new Y-axis
Useful for rigid body axes definintion
:param array X: normalized vector
:param array Y: normalized vector
:returns: array M: transformation matrix
use as XYZ' = np.inner(M,XYZ) where XYZ are Cartesian
'''
Mat2 = np.cross(X,Y) #UxV-->Z
Mat2 /= np.sqrt(np.sum(Mat2**2))
Mat3 = np.cross(Mat2,X) #(UxV)xU-->Y
Mat3 /= np.sqrt(np.sum(Mat3**2))
return np.array([X,Mat3,Mat2])
def RotateRBXYZ(Bmat,Cart,oriQ):
'''rotate & transform cartesian coordinates to crystallographic ones
no translation applied. To be used for numerical derivatives
:param type name: description
:returns: type name: description
'''
''' returns crystal coordinates for atoms described by RBObj
'''
XYZ = np.zeros_like(Cart)
for i,xyz in enumerate(Cart):
XYZ[i] = np.inner(Bmat,prodQVQ(oriQ,xyz))
return XYZ
def UpdateRBXYZ(Bmat,RBObj,RBData,RBType):
'''default doc string
:param type name: description
:returns: type name: description
'''
''' returns crystal coordinates for atoms described by RBObj
'''
RBRes = RBData[RBType][RBObj['RBId']]
if RBType == 'Vector':
vecs = RBRes['rbVect']
mags = RBRes['VectMag']
Cart = np.zeros_like(vecs[0])
for vec,mag in zip(vecs,mags):
Cart += vec*mag
elif RBType == 'Residue':
Cart = np.array(RBRes['rbXYZ'])
for tor,seq in zip(RBObj['Torsions'],RBRes['rbSeq']):
QuatA = AVdeg2Q(tor[0],Cart[seq[0]]-Cart[seq[1]])
Cart[seq[3]] = prodQVQ(QuatA,(Cart[seq[3]]-Cart[seq[1]]))+Cart[seq[1]]
XYZ = np.zeros_like(Cart)
for i,xyz in enumerate(Cart):
XYZ[i] = np.inner(Bmat,prodQVQ(RBObj['Orient'][0],xyz))+RBObj['Orig'][0]
return XYZ,Cart
def UpdateMCSAxyz(Bmat,MCSA):
'''default doc string
:param type name: description
:returns: type name: description
'''
xyz = []
atTypes = []
iatm = 0
for model in MCSA['Models'][1:]: #skip the MD model
if model['Type'] == 'Atom':
xyz.append(model['Pos'][0])
atTypes.append(model['atType'])
iatm += 1
else:
RBRes = MCSA['rbData'][model['Type']][model['RBId']]
Pos = np.array(model['Pos'][0])
Ori = np.array(model['Ori'][0])
Qori = AVdeg2Q(Ori[0],Ori[1:])
if model['Type'] == 'Vector':
vecs = RBRes['rbVect']
mags = RBRes['VectMag']
Cart = np.zeros_like(vecs[0])
for vec,mag in zip(vecs,mags):
Cart += vec*mag
elif model['Type'] == 'Residue':
Cart = np.array(RBRes['rbXYZ'])
for itor,seq in enumerate(RBRes['rbSeq']):
QuatA = AVdeg2Q(model['Tor'][0][itor],Cart[seq[0]]-Cart[seq[1]])
Cart[seq[3]] = prodQVQ(QuatA,(Cart[seq[3]]-Cart[seq[1]]))+Cart[seq[1]]
if model['MolCent'][1]:
Cart -= model['MolCent'][0]
for i,x in enumerate(Cart):
xyz.append(np.inner(Bmat,prodQVQ(Qori,x))+Pos)
atType = RBRes['rbTypes'][i]
atTypes.append(atType)
iatm += 1
return np.array(xyz),atTypes
def SetMolCent(model,RBData):
'''default doc string
:param type name: description
:returns: type name: description
'''
rideList = []
RBRes = RBData[model['Type']][model['RBId']]
if model['Type'] == 'Vector':
vecs = RBRes['rbVect']
mags = RBRes['VectMag']
Cart = np.zeros_like(vecs[0])
for vec,mag in zip(vecs,mags):
Cart += vec*mag
elif model['Type'] == 'Residue':
Cart = np.array(RBRes['rbXYZ'])
for seq in RBRes['rbSeq']:
rideList += seq[3]
centList = set(range(len(Cart)))-set(rideList)
cent = np.zeros(3)
for i in centList:
cent += Cart[i]
model['MolCent'][0] = cent/len(centList)
def UpdateRBUIJ(Bmat,Cart,RBObj):
'''default doc string
:param type name: description
:returns: type name: description
'''
''' returns atom I/A, Uiso or UIJ for atoms at XYZ as described by RBObj
'''
TLStype,TLS = RBObj['ThermalMotion'][:2]
T = np.zeros(6)
L = np.zeros(6)
S = np.zeros(8)
if 'T' in TLStype:
T = TLS[:6]
if 'L' in TLStype:
L = np.array(TLS[6:12])*(np.pi/180.)**2
if 'S' in TLStype:
S = np.array(TLS[12:])*(np.pi/180.)
g = nl.inv(np.inner(Bmat,Bmat))
gvec = np.sqrt(np.array([g[0][0]**2,g[1][1]**2,g[2][2]**2,
g[0][0]*g[1][1],g[0][0]*g[2][2],g[1][1]*g[2][2]]))
Uout = []
Q = RBObj['Orient'][0]
for X in Cart:
X = prodQVQ(Q,X)
if 'U' in TLStype:
Uout.append(['I',TLS[0],0,0,0,0,0,0])
elif not 'N' in TLStype:
U = [0,0,0,0,0,0]
U[0] = T[0]+L[1]*X[2]**2+L[2]*X[1]**2-2.0*L[5]*X[1]*X[2]+2.0*(S[2]*X[2]-S[4]*X[1])
U[1] = T[1]+L[0]*X[2]**2+L[2]*X[0]**2-2.0*L[4]*X[0]*X[2]+2.0*(S[5]*X[0]-S[0]*X[2])
U[2] = T[2]+L[1]*X[0]**2+L[0]*X[1]**2-2.0*L[3]*X[1]*X[0]+2.0*(S[1]*X[1]-S[3]*X[0])
U[3] = T[3]+L[4]*X[1]*X[2]+L[5]*X[0]*X[2]-L[3]*X[2]**2-L[2]*X[0]*X[1]+ \
S[4]*X[0]-S[5]*X[1]-(S[6]+S[7])*X[2]
U[4] = T[4]+L[3]*X[1]*X[2]+L[5]*X[0]*X[1]-L[4]*X[1]**2-L[1]*X[0]*X[2]+ \
S[3]*X[2]-S[2]*X[0]+S[6]*X[1]
U[5] = T[5]+L[3]*X[0]*X[2]+L[4]*X[0]*X[1]-L[5]*X[0]**2-L[0]*X[2]*X[1]+ \
S[0]*X[1]-S[1]*X[2]+S[7]*X[0]
Umat = G2lat.U6toUij(U)
beta = np.inner(np.inner(Bmat.T,Umat),Bmat)
Uout.append(['A',0.0,]+list(G2lat.UijtoU6(beta)*gvec))
else:
Uout.append(['N',])
return Uout
def GetSHCoeff(pId,parmDict,SHkeys):
'''default doc string
:param type name: description
:returns: type name: description
'''
SHCoeff = {}
for shkey in SHkeys:
shname = str(pId)+'::'+shkey
SHCoeff[shkey] = parmDict[shname]
return SHCoeff
def getMass(generalData):
'''Computes mass of unit cell contents
:param dict generalData: The General dictionary in Phase
:returns: float mass: Crystal unit cell mass in AMU.
'''
mass = 0.
for i,elem in enumerate(generalData['AtomTypes']):
mass += generalData['NoAtoms'][elem]*generalData['AtomMass'][i]
return max(mass,1.0)
def getDensity(generalData):
'''calculate crystal structure density
:param dict generalData: The General dictionary in Phase
:returns: float density: crystal density in gm/cm^3
'''
mass = getMass(generalData)
Volume = generalData['Cell'][7]
density = mass/(0.6022137*Volume)
return density,Volume/mass
def getWave(Parms):
'''returns wavelength from Instrument parameters dictionary
:param dict Parms: Instrument parameters;
must contain:
Lam: single wavelength
or
Lam1: Ka1 radiation wavelength
:returns: float wave: wavelength
'''
try:
return Parms['Lam'][1]
except KeyError:
return Parms['Lam1'][1]
def getMeanWave(Parms):
'''returns mean wavelength from Instrument parameters dictionary
:param dict Parms: Instrument parameters;
must contain:
Lam: single wavelength
or
Lam1,Lam2: Ka1,Ka2 radiation wavelength
I(L2)/I(L1): Ka2/Ka1 ratio
:returns: float wave: mean wavelength
'''
try:
return Parms['Lam'][1]
except KeyError:
meanLam = (Parms['Lam1'][1]+Parms['I(L2)/I(L1)'][1]*Parms['Lam2'][1])/ \
(1.+Parms['I(L2)/I(L1)'][1])
return meanLam
def El2Mass(Elements):
'''compute molecular weight from Elements
:param dict Elements: elements in molecular formula;
each must contain
Num: number of atoms in formula
Mass: at. wt.
:returns: float mass: molecular weight.
'''
mass = 0
for El in Elements:
mass += Elements[El]['Num']*Elements[El]['Mass']
return mass
def Den2Vol(Elements,density):
'''converts density to molecular volume
:param dict Elements: elements in molecular formula;
each must contain
Num: number of atoms in formula
Mass: at. wt.
:param float density: material density in gm/cm^3
:returns: float volume: molecular volume in A^3
'''
return El2Mass(Elements)/(density*0.6022137)
def Vol2Den(Elements,volume):
'''converts volume to density
:param dict Elements: elements in molecular formula;
each must contain
Num: number of atoms in formula
Mass: at. wt.
:param float volume: molecular volume in A^3
:returns: float density: material density in gm/cm^3
'''
return El2Mass(Elements)/(volume*0.6022137)
def El2EstVol(Elements):
'''Estimate volume from molecular formula; assumes atom volume = 10A^3
:param dict Elements: elements in molecular formula;
each must contain
Num: number of atoms in formula
:returns: float volume: estimate of molecular volume in A^3
'''
vol = 0
for El in Elements:
vol += 10.*Elements[El]['Num']
return vol
def XScattDen(Elements,vol,wave=0.):
'''Estimate X-ray scattering density from molecular formula & volume;
ignores valence, but includes anomalous effects
:param dict Elements: elements in molecular formula;
each element must contain
Num: number of atoms in formula
Z: atomic number
:param float vol: molecular volume in A^3
:param float wave: optional wavelength in A
:returns: float rho: scattering density in 10^10cm^-2;
if wave > 0 the includes f' contribution
:returns: float mu: if wave>0 absorption coeff in cm^-1 ; otherwise 0
:returns: float fpp: if wave>0 f" in 10^10cm^-2; otherwise 0
'''
rho = 0
mu = 0
fpp = 0
if wave:
Xanom = XAnomAbs(Elements,wave)
for El in Elements:
f0 = Elements[El]['Z']
if wave:
f0 += Xanom[El][0]
fpp += Xanom[El][1]*Elements[El]['Num']
mu += Xanom[El][2]*Elements[El]['Num']
rho += Elements[El]['Num']*f0
return 28.179*rho/vol,0.1*mu/vol,28.179*fpp/vol
def NCScattDen(Elements,vol,wave=0.):
'''Estimate neutron scattering density from molecular formula & volume;
ignores valence, but includes anomalous effects
:param dict Elements: elements in molecular formula;
each element must contain
Num: number of atoms in formula
Z: atomic number
:param float vol: molecular volume in A^3
:param float wave: optional wavelength in A
:returns: float rho: scattering density in 10^10cm^-2;
if wave > 0 the includes f' contribution
:returns: float mu: if wave>0 absorption coeff in cm^-1 ; otherwise 0
:returns: float fpp: if wave>0 f" in 10^10cm^-2; otherwise 0
'''
rho = 0
mu = 0
bpp = 0
for El in Elements:
isotope = Elements[El]['Isotope']
b0 = Elements[El]['Isotopes'][isotope]['SL'][0]
mu += Elements[El]['Isotopes'][isotope].get('SA',0.)*Elements[El]['Num']
if wave and 'BW-LS' in Elements[El]['Isotopes'][isotope]:
Re,Im,E0,gam,A,E1,B,E2 = Elements[El]['Isotopes'][isotope]['BW-LS'][1:]
Emev = 81.80703/wave**2
T0 = Emev-E0
T1 = Emev-E1
T2 = Emev-E2
D0 = T0**2+gam**2
D1 = T1**2+gam**2
D2 = T2**2+gam**2
b0 += Re*(T0/D0+A*T1/D1+B*T2/D2)
bpp += Im*(1/D0+A/D1+B/D2)
else:
bpp += Elements[El]['Isotopes'][isotope]['SL'][1]
rho += Elements[El]['Num']*b0
if wave: mu *= wave
return 100.*rho/vol,mu/vol,100.*bpp/vol
def wavekE(wavekE):
'''Convert wavelength to energy & vise versa
:param float waveKe:wavelength in A or energy in kE
:returns float waveKe:the other one
'''
return 12.397639/wavekE
def XAnomAbs(Elements,wave):
kE = wavekE(wave)
Xanom = {}
for El in Elements:
Orbs = G2el.GetXsectionCoeff(El)
Xanom[El] = G2el.FPcalc(Orbs, kE)
return Xanom #f',f", mu
################################################################################
#### Modulation math
################################################################################
def makeWaves(waveTypes,FSSdata,XSSdata,USSdata,MSSdata,Mast):
'''
waveTypes: array nAtoms: 'Fourier','ZigZag' or 'Block'
FSSdata: array 2 x atoms x waves (sin,cos terms)
XSSdata: array 2x3 x atoms X waves (sin,cos terms)
USSdata: array 2x6 x atoms X waves (sin,cos terms)
MSSdata: array 2x3 x atoms X waves (sin,cos terms)
Mast: array orthogonalization matrix for Uij
'''
ngl = 32
glTau,glWt = pwd.pygauleg(0.,1.,ngl) #get Gauss-Legendre intervals & weights
Ax = np.array(XSSdata[:3]).T #atoms x waves x sin pos mods
Bx = np.array(XSSdata[3:]).T #...cos pos mods
Af = np.array(FSSdata[0]).T #sin frac mods x waves x atoms
Bf = np.array(FSSdata[1]).T #cos frac mods...
Au = Mast*np.array(G2lat.U6toUij(USSdata[:6])).T #atoms x waves x sin Uij mods as betaij
Bu = Mast*np.array(G2lat.U6toUij(USSdata[6:])).T #...cos Uij mods as betaij
nWaves = [Af.shape[1],Ax.shape[1],Au.shape[1]]
if nWaves[0]:
tauF = np.arange(1.,nWaves[0]+1)[:,nxs]*glTau #Fwaves x ngl
FmodA = Af[:,:,nxs]*np.sin(twopi*tauF[nxs,:,:]) #atoms X Fwaves X ngl
FmodB = Bf[:,:,nxs]*np.cos(twopi*tauF[nxs,:,:])
Fmod = np.sum(1.0+FmodA+FmodB,axis=1) #atoms X ngl; sum waves
else:
Fmod = 1.0
XmodZ = np.zeros((Ax.shape[0],Ax.shape[1],3,ngl))
XmodA = np.zeros((Ax.shape[0],Ax.shape[1],3,ngl))
XmodB = np.zeros((Ax.shape[0],Ax.shape[1],3,ngl))
for iatm in range(Ax.shape[0]):
nx = 0
if 'ZigZag' in waveTypes[iatm]:
nx = 1
Tmm = Ax[iatm][0][:2]
XYZmax = np.array([Ax[iatm][0][2],Bx[iatm][0][0],Bx[iatm][0][1]])
XmodZ[iatm][0] += posZigZag(glTau,Tmm,XYZmax).T
elif 'Block' in waveTypes[iatm]:
nx = 1
Tmm = Ax[iatm][0][:2]
XYZmax = np.array([Ax[iatm][0][2],Bx[iatm][0][0],Bx[iatm][0][1]])
XmodZ[iatm][0] += posBlock(glTau,Tmm,XYZmax).T
tauX = np.arange(1.,nWaves[1]+1-nx)[:,nxs]*glTau #Xwaves x ngl
if nx:
XmodA[iatm][:-nx] = Ax[iatm,nx:,:,nxs]*np.sin(twopi*tauX[nxs,:,nxs,:]) #atoms X waves X 3 X ngl
XmodB[iatm][:-nx] = Bx[iatm,nx:,:,nxs]*np.cos(twopi*tauX[nxs,:,nxs,:]) #ditto
else:
XmodA[iatm] = Ax[iatm,:,:,nxs]*np.sin(twopi*tauX[nxs,:,nxs,:]) #atoms X waves X 3 X ngl
XmodB[iatm] = Bx[iatm,:,:,nxs]*np.cos(twopi*tauX[nxs,:,nxs,:]) #ditto
Xmod = np.sum(XmodA+XmodB+XmodZ,axis=1) #atoms X 3 X ngl; sum waves
Xmod = np.swapaxes(Xmod,1,2)
if nWaves[2]:
tauU = np.arange(1.,nWaves[2]+1)[:,nxs]*glTau #Uwaves x ngl
UmodA = Au[:,:,:,:,nxs]*np.sin(twopi*tauU[nxs,:,nxs,nxs,:]) #atoms x waves x 3x3 x ngl
UmodB = Bu[:,:,:,:,nxs]*np.cos(twopi*tauU[nxs,:,nxs,nxs,:]) #ditto
Umod = np.swapaxes(np.sum(UmodA+UmodB,axis=1),1,3) #atoms x 3x3 x ngl; sum waves
else:
Umod = 1.0
Mmod = 1.0
return ngl,nWaves,Fmod,Xmod,Umod,Mmod,glTau,glWt
def Modulation(H,HP,nWaves,Fmod,Xmod,Umod,glTau,glWt):
'''
H: array nRefBlk x ops X hklt
HP: array nRefBlk x ops X hklt proj to hkl
Fmod: array 2 x atoms x waves (sin,cos terms)
Xmod: array atoms X 3 X ngl
Umod: array atoms x 3x3 x ngl
glTau,glWt: arrays Gauss-Lorentzian pos & wts
'''
if nWaves[2]:
if len(HP.shape) > 2:
HbH = np.exp(-np.sum(HP[:,:,nxs,nxs,:]*np.inner(HP,Umod),axis=-1)) # refBlk x ops x atoms x ngl add Overhauser corr.?
else:
HbH = np.exp(-np.sum(HP[:,nxs,nxs,:]*np.inner(HP,Umod),axis=-1)) # refBlk x ops x atoms x ngl add Overhauser corr.?
else:
HbH = 1.0
HdotX = np.inner(HP,Xmod) #refBlk x ops x atoms X ngl
if len(H.shape) > 2:
D = H[:,:,3:]*glTau[nxs,nxs,:] #m*e*tau; refBlk x ops X ngl
HdotXD = twopi*(HdotX+D[:,:,nxs,:])
else:
D = H[:,3:]*glTau[nxs,:] #m*e*tau; refBlk x ops X ngl
HdotXD = twopi*(HdotX+D[:,nxs,:])
cosHA = np.sum(Fmod*HbH*np.cos(HdotXD)*glWt,axis=-1) #real part; refBlk X ops x atoms; sum for G-L integration
sinHA = np.sum(Fmod*HbH*np.sin(HdotXD)*glWt,axis=-1) #imag part; ditto
return np.array([cosHA,sinHA]) # 2 x refBlk x SGops x atoms
def ModulationTw(H,HP,nWaves,Fmod,Xmod,Umod,glTau,glWt):
'''
H: array nRefBlk x tw x ops X hklt
HP: array nRefBlk x tw x ops X hklt proj to hkl
Fmod: array 2 x atoms x waves (sin,cos terms)
Xmod: array atoms X ngl X 3
Umod: array atoms x ngl x 3x3
glTau,glWt: arrays Gauss-Lorentzian pos & wts
'''
if nWaves[2]:
if len(HP.shape) > 3: #Blocks of reflections
HbH = np.exp(-np.sum(HP[:,:,nxs,nxs,:]*np.inner(HP,Umod),axis=-1)) # refBlk x ops x atoms x ngl add Overhauser corr.?
else: #single reflections
HbH = np.exp(-np.sum(HP[:,nxs,nxs,:]*np.inner(HP,Umod),axis=-1)) # refBlk x ops x atoms x ngl add Overhauser corr.?
else:
HbH = 1.0
HdotX = np.inner(HP,Xmod) #refBlk x tw x ops x atoms X ngl
if len(H.shape) > 3:
D = glTau*H[:,:,:,3:,nxs] #m*e*tau; refBlk x tw x ops X ngl
HdotXD = twopi*(HdotX+D[:,:,:,nxs,:])
else:
D = H*glTau[nxs,:] #m*e*tau; refBlk x ops X ngl
HdotXD = twopi*(HdotX+D[:,nxs,:])
cosHA = np.sum(Fmod*HbH*np.cos(HdotXD)*glWt,axis=-1) #real part; refBlk X ops x atoms; sum for G-L integration
sinHA = np.sum(Fmod*HbH*np.sin(HdotXD)*glWt,axis=-1) #imag part; ditto
return np.array([cosHA,sinHA]) # 2 x refBlk x SGops x atoms
def makeWavesDerv(ngl,waveTypes,FSSdata,XSSdata,USSdata,MSSdata,Mast):
'''
FSSdata: array 2 x atoms x waves (sin,cos terms)
XSSdata: array 2x3 x atoms X waves (sin,cos terms)
USSdata: array 2x6 x atoms X waves (sin,cos terms)
Mast: array orthogonalization matrix for Uij
'''
glTau,glWt = pwd.pygauleg(0.,1.,ngl) #get Gauss-Legendre intervals & weights
waveShapes = [FSSdata.T.shape,XSSdata.T.shape,USSdata.T.shape]
Af = np.array(FSSdata[0]).T #sin frac mods x waves x atoms
Bf = np.array(FSSdata[1]).T #cos frac mods...
Ax = np.array(XSSdata[:3]).T #...cos pos mods x waves x atoms
Bx = np.array(XSSdata[3:]).T #...cos pos mods
Au = Mast*np.array(G2lat.U6toUij(USSdata[:6])).T #atoms x waves x sin Uij mods
Bu = Mast*np.array(G2lat.U6toUij(USSdata[6:])).T #...cos Uij mods
nWaves = [Af.shape[1],Ax.shape[1],Au.shape[1]]
StauX = np.zeros((Ax.shape[0],Ax.shape[1],3,ngl)) #atoms x waves x 3 x ngl
CtauX = np.zeros((Ax.shape[0],Ax.shape[1],3,ngl))
ZtauXt = np.zeros((Ax.shape[0],2,3,ngl)) #atoms x Tminmax x 3 x ngl
ZtauXx = np.zeros((Ax.shape[0],3,ngl)) #atoms x XYZmax x ngl
for iatm in range(Ax.shape[0]):
nx = 0
if 'ZigZag' in waveTypes[iatm]:
nx = 1
Tmm = Ax[iatm][0][:2]
XYZmax = np.array([Ax[iatm][0][2],Bx[iatm][0][0],Bx[iatm][0][1]])
ZtauXt[iatm],ZtauXx[iatm] = posZigZagDerv(glTau,Tmm,XYZmax)
elif 'Block' in waveTypes[iatm]:
nx = 1
Tmm = Ax[iatm][0][:2]
XYZmax = np.array([Ax[iatm][0][2],Bx[iatm][0][0],Bx[iatm][0][1]])
ZtauXt[iatm],ZtauXx[iatm] = posBlockDerv(glTau,Tmm,XYZmax)
tauX = np.arange(1.,nWaves[1]+1-nx)[:,nxs]*glTau #Xwaves x ngl
if nx:
StauX[iatm][:-nx] = np.ones_like(Ax)[iatm,nx:,:,nxs]*np.sin(twopi*tauX)[nxs,:,nxs,:] #atoms X waves X 3(xyz) X ngl
CtauX[iatm][:-nx] = np.ones_like(Bx)[iatm,nx:,:,nxs]*np.cos(twopi*tauX)[nxs,:,nxs,:] #ditto
else:
StauX[iatm] = np.ones_like(Ax)[iatm,:,:,nxs]*np.sin(twopi*tauX)[nxs,:,nxs,:] #atoms X waves X 3(xyz) X ngl
CtauX[iatm] = np.ones_like(Bx)[iatm,:,:,nxs]*np.cos(twopi*tauX)[nxs,:,nxs,:] #ditto
# GSASIIpath.IPyBreak()
if nWaves[0]:
tauF = np.arange(1.,nWaves[0]+1)[:,nxs]*glTau #Fwaves x ngl
StauF = np.ones_like(Af)[:,:,nxs]*np.sin(twopi*tauF)[nxs,:,:] #also dFmod/dAf
CtauF = np.ones_like(Bf)[:,:,nxs]*np.cos(twopi*tauF)[nxs,:,:] #also dFmod/dBf
else:
StauF = 1.0
CtauF = 1.0
if nWaves[2]:
tauU = np.arange(1.,nWaves[2]+1)[:,nxs]*glTau #Uwaves x ngl
StauU = np.ones_like(Au)[:,:,:,:,nxs]*np.sin(twopi*tauU)[nxs,:,nxs,nxs,:] #also dUmodA/dAu
CtauU = np.ones_like(Bu)[:,:,:,:,nxs]*np.cos(twopi*tauU)[nxs,:,nxs,nxs,:] #also dUmodB/dBu
UmodA = Au[:,:,:,:,nxs]*StauU #atoms x waves x 3x3 x ngl
UmodB = Bu[:,:,:,:,nxs]*CtauU #ditto
#derivs need to be ops x atoms x waves x 6uij; ops x atoms x waves x ngl x 6uij before sum
StauU = np.rollaxis(np.rollaxis(np.swapaxes(StauU,2,4),-1),-1)
CtauU = np.rollaxis(np.rollaxis(np.swapaxes(CtauU,2,4),-1),-1)
else:
StauU = 1.0
CtauU = 1.0
UmodA = 0.
UmodB = 0.
return waveShapes,[StauF,CtauF],[StauX,CtauX,ZtauXt,ZtauXx],[StauU,CtauU],UmodA+UmodB
def ModulationDerv(H,HP,Hij,nWaves,waveShapes,Fmod,Xmod,UmodAB,SCtauF,SCtauX,SCtauU,glTau,glWt):
'''
H: array ops X hklt proj to hkl
HP: array ops X hklt proj to hkl
Hij: array 2pi^2[a*^2h^2 b*^2k^2 c*^2l^2 a*b*hk a*c*hl b*c*kl] of projected hklm to hkl space
'''
Mf = [H.shape[0],]+list(waveShapes[0]) #=[ops,atoms,waves,2] (sin+cos frac mods)
dGdMfC = np.zeros(Mf)
dGdMfS = np.zeros(Mf)
Mx = [H.shape[0],]+list(waveShapes[1]) #=[ops,atoms,waves,6] (sin+cos pos mods)
dGdMxC = np.zeros(Mx)
dGdMxS = np.zeros(Mx)
Mu = [H.shape[0],]+list(waveShapes[2]) #=[ops,atoms,waves,12] (sin+cos Uij mods)
dGdMuC = np.zeros(Mu)
dGdMuS = np.zeros(Mu)
D = twopi*H[:,3][:,nxs]*glTau[nxs,:] #m*e*tau; ops X ngl
HdotX = twopi*np.inner(HP,Xmod) #ops x atoms X ngl
HdotXD = HdotX+D[:,nxs,:]
if nWaves[2]:
Umod = np.swapaxes((UmodAB),2,4) #atoms x waves x ngl x 3x3 (symmetric so I can do this!)
HuH = np.sum(HP[:,nxs,nxs,nxs]*np.inner(HP,Umod),axis=-1) #ops x atoms x waves x ngl
HuH = np.sum(HP[:,nxs,nxs,nxs]*np.inner(HP,Umod),axis=-1) #ops x atoms x waves x ngl
HbH = np.exp(-np.sum(HuH,axis=-2)) # ops x atoms x ngl; sum waves - OK vs Modulation version
part1 = -np.exp(-HuH)*Fmod #ops x atoms x waves x ngl
dUdAu = Hij[:,nxs,nxs,nxs,:]*np.rollaxis(G2lat.UijtoU6(SCtauU[0]),0,4)[nxs,:,:,:,:] #ops x atoms x waves x ngl x 6sinUij
dUdBu = Hij[:,nxs,nxs,nxs,:]*np.rollaxis(G2lat.UijtoU6(SCtauU[1]),0,4)[nxs,:,:,:,:] #ops x atoms x waves x ngl x 6cosUij
dGdMuCa = np.sum(part1[:,:,:,:,nxs]*dUdAu*np.cos(HdotXD)[:,:,nxs,:,nxs]*glWt[nxs,nxs,nxs,:,nxs],axis=-2) #ops x atoms x waves x 6uij; G-L sum
dGdMuCb = np.sum(part1[:,:,:,:,nxs]*dUdBu*np.cos(HdotXD)[:,:,nxs,:,nxs]*glWt[nxs,nxs,nxs,:,nxs],axis=-2)
dGdMuC = np.concatenate((dGdMuCa,dGdMuCb),axis=-1) #ops x atoms x waves x 12uij
dGdMuSa = np.sum(part1[:,:,:,:,nxs]*dUdAu*np.sin(HdotXD)[:,:,nxs,:,nxs]*glWt[nxs,nxs,nxs,:,nxs],axis=-2) #ops x atoms x waves x 6uij; G-L sum
dGdMuSb = np.sum(part1[:,:,:,:,nxs]*dUdBu*np.sin(HdotXD)[:,:,nxs,:,nxs]*glWt[nxs,nxs,nxs,:,nxs],axis=-2)
dGdMuS = np.concatenate((dGdMuSa,dGdMuSb),axis=-1) #ops x atoms x waves x 12uij
else:
HbH = np.ones_like(HdotX)
dHdXA = twopi*HP[:,nxs,nxs,nxs,:]*np.swapaxes(SCtauX[0],-1,-2)[nxs,:,:,:,:] #ops x atoms x sine waves x ngl x xyz
dHdXB = twopi*HP[:,nxs,nxs,nxs,:]*np.swapaxes(SCtauX[1],-1,-2)[nxs,:,:,:,:] #ditto - cos waves
# ops x atoms x waves x 2xyz - real part - good
dGdMxCa = -np.sum((Fmod*HbH)[:,:,nxs,:,nxs]*(dHdXA*np.sin(HdotXD)[:,:,nxs,:,nxs])*glWt[nxs,nxs,nxs,:,nxs],axis=-2)
dGdMxCb = -np.sum((Fmod*HbH)[:,:,nxs,:,nxs]*(dHdXB*np.sin(HdotXD)[:,:,nxs,:,nxs])*glWt[nxs,nxs,nxs,:,nxs],axis=-2)
dGdMxC = np.concatenate((dGdMxCa,dGdMxCb),axis=-1)
# ops x atoms x waves x 2xyz - imag part - good
dGdMxSa = np.sum((Fmod*HbH)[:,:,nxs,:,nxs]*(dHdXA*np.cos(HdotXD)[:,:,nxs,:,nxs])*glWt[nxs,nxs,nxs,:,nxs],axis=-2)
dGdMxSb = np.sum((Fmod*HbH)[:,:,nxs,:,nxs]*(dHdXB*np.cos(HdotXD)[:,:,nxs,:,nxs])*glWt[nxs,nxs,nxs,:,nxs],axis=-2)
dGdMxS = np.concatenate((dGdMxSa,dGdMxSb),axis=-1)
# ZigZag/Block waves - problems here?
dHdXZt = -twopi*HP[:,nxs,nxs,nxs,:]*np.swapaxes(SCtauX[2],-1,-2)[nxs,:,:,:,:] #ops x atoms x ngl x 2(ZigZag/Block Tminmax)
dHdXZx = twopi*HP[:,nxs,nxs,:]*np.swapaxes(SCtauX[3],-1,-2)[nxs,:,:,:] #ops x atoms x ngl x 3(ZigZag/Block XYZmax)
dGdMzCt = -np.sum((Fmod*HbH)[:,:,nxs,:,nxs]*(dHdXZt*np.sin(HdotXD)[:,:,nxs,:,nxs])*glWt[nxs,nxs,nxs,:,nxs],axis=-2)
dGdMzCx = -np.sum((Fmod*HbH)[:,:,:,nxs]*(dHdXZx*np.sin(HdotXD)[:,:,:,nxs])*glWt[nxs,nxs,:,nxs],axis=-2)
dGdMzC = np.concatenate((np.sum(dGdMzCt,axis=-1),dGdMzCx),axis=-1)
dGdMzSt = np.sum((Fmod*HbH)[:,:,nxs,:,nxs]*(dHdXZt*np.cos(HdotXD)[:,:,nxs,:,nxs])*glWt[nxs,nxs,nxs,:,nxs],axis=-2)
dGdMzSx = np.sum((Fmod*HbH)[:,:,:,nxs]*(dHdXZx*np.cos(HdotXD)[:,:,:,nxs])*glWt[nxs,nxs,:,nxs],axis=-2)
dGdMzS = np.concatenate((np.sum(dGdMzSt,axis=-1),dGdMzSx),axis=-1)
# GSASIIpath.IPyBreak()
return [dGdMfC,dGdMfS],[dGdMxC,dGdMxS],[dGdMuC,dGdMuS],[dGdMzC,dGdMzS]
def ModulationDerv2(H,HP,Hij,nWaves,waveShapes,Fmod,Xmod,UmodAB,SCtauF,SCtauX,SCtauU,glTau,glWt):
'''
H: array refBlk x ops X hklt proj to hkl
HP: array refBlk x ops X hklt proj to hkl
Hij: array 2pi^2[a*^2h^2 b*^2k^2 c*^2l^2 a*b*hk a*c*hl b*c*kl] of projected hklm to hkl space
'''
Mf = [H.shape[0],]+list(waveShapes[0]) #=[ops,atoms,waves,2] (sin+cos frac mods)
dGdMfC = np.zeros(Mf)
dGdMfS = np.zeros(Mf)
Mx = [H.shape[0],]+list(waveShapes[1]) #=[ops,atoms,waves,6] (sin+cos pos mods)
dGdMxC = np.zeros(Mx)
dGdMxS = np.zeros(Mx)
Mu = [H.shape[0],]+list(waveShapes[2]) #=[ops,atoms,waves,12] (sin+cos Uij mods)
dGdMuC = np.zeros(Mu)
dGdMuS = np.zeros(Mu)
D = twopi*H[:,:,3,nxs]*glTau[nxs,nxs,:] #m*e*tau; refBlk x ops X ngl
HdotX = twopi*np.inner(HP,Xmod) #refBlk x ops x atoms X ngl
HdotXD = HdotX+D[:,:,nxs,:]
if nWaves[2]:
Umod = np.swapaxes((UmodAB),2,4) #atoms x waves x ngl x 3x3 (symmetric so I can do this!)
HuH = np.sum(HP[:,:,nxs,nxs,nxs]*np.inner(HP,Umod),axis=-1) #refBlk x ops x atoms x waves x ngl
HuH = np.sum(HP[:,:,nxs,nxs,nxs]*np.inner(HP,Umod),axis=-1) #refBlk x ops x atoms x waves x ngl
HbH = np.exp(-np.sum(HuH,axis=-2)) #refBlk x ops x atoms x ngl; sum waves - OK vs Modulation version
part1 = -np.exp(-HuH)*Fmod #refBlk x ops x atoms x waves x ngl
dUdAu = Hij[:,:,nxs,nxs,nxs,:]*np.rollaxis(G2lat.UijtoU6(SCtauU[0]),0,4)[nxs,nxs,:,:,:,:] #ops x atoms x waves x ngl x 6sinUij
dUdBu = Hij[:,:,nxs,nxs,nxs,:]*np.rollaxis(G2lat.UijtoU6(SCtauU[1]),0,4)[nxs,nxs,:,:,:,:] #ops x atoms x waves x ngl x 6cosUij
dGdMuCa = np.sum(part1[:,:,:,:,:,nxs]*dUdAu*np.cos(HdotXD)[:,:,:,nxs,:,nxs]*glWt[nxs,nxs,nxs,nxs,:,nxs],axis=-2) #ops x atoms x waves x 6uij; G-L sum
dGdMuCb = np.sum(part1[:,:,:,:,:,nxs]*dUdBu*np.cos(HdotXD)[:,:,:,nxs,:,nxs]*glWt[nxs,nxs,nxs,nxs,:,nxs],axis=-2)
dGdMuC = np.concatenate((dGdMuCa,dGdMuCb),axis=-1) #ops x atoms x waves x 12uij
dGdMuSa = np.sum(part1[:,:,:,:,:,nxs]*dUdAu*np.sin(HdotXD)[:,:,:,nxs,:,nxs]*glWt[nxs,nxs,nxs,nxs,:,nxs],axis=-2) #ops x atoms x waves x 6uij; G-L sum
dGdMuSb = np.sum(part1[:,:,:,:,:,nxs]*dUdBu*np.sin(HdotXD)[:,:,:,nxs,:,nxs]*glWt[nxs,nxs,nxs,nxs,:,nxs],axis=-2)
dGdMuS = np.concatenate((dGdMuSa,dGdMuSb),axis=-1) #ops x atoms x waves x 12uij
else:
HbH = np.ones_like(HdotX)
dHdXA = twopi*HP[:,:,nxs,nxs,nxs,:]*np.swapaxes(SCtauX[0],-1,-2)[nxs,nxs,:,:,:,:] #ops x atoms x sine waves x ngl x xyz
dHdXB = twopi*HP[:,:,nxs,nxs,nxs,:]*np.swapaxes(SCtauX[1],-1,-2)[nxs,nxs,:,:,:,:] #ditto - cos waves
# ops x atoms x waves x 2xyz - real part - good
dGdMxCa = -np.sum((Fmod*HbH)[:,:,:,nxs,:,nxs]*(dHdXA*np.sin(HdotXD)[:,:,:,nxs,:,nxs])*glWt[nxs,nxs,nxs,nxs,:,nxs],axis=-2)
dGdMxCb = -np.sum((Fmod*HbH)[:,:,:,nxs,:,nxs]*(dHdXB*np.sin(HdotXD)[:,:,:,nxs,:,nxs])*glWt[nxs,nxs,nxs,nxs,:,nxs],axis=-2)
dGdMxC = np.concatenate((dGdMxCa,dGdMxCb),axis=-1)
# ops x atoms x waves x 2xyz - imag part - good
dGdMxSa = np.sum((Fmod*HbH)[:,:,:,nxs,:,nxs]*(dHdXA*np.cos(HdotXD)[:,:,:,nxs,:,nxs])*glWt[nxs,nxs,nxs,nxs,:,nxs],axis=-2)
dGdMxSb = np.sum((Fmod*HbH)[:,:,:,nxs,:,nxs]*(dHdXB*np.cos(HdotXD)[:,:,:,nxs,:,nxs])*glWt[nxs,nxs,nxs,nxs,:,nxs],axis=-2)
dGdMxS = np.concatenate((dGdMxSa,dGdMxSb),axis=-1)
# ZigZag/Block waves - problems here?
dHdXZt = -twopi*HP[:,:,nxs,nxs,nxs,:]*np.swapaxes(SCtauX[2],-1,-2)[nxs,nxs,:,:,:,:] #ops x atoms x ngl x 2(ZigZag/Block Tminmax)
dHdXZx = twopi*HP[:,:,nxs,nxs,:]*np.swapaxes(SCtauX[3],-1,-2)[nxs,nxs,:,:,:] #ops x atoms x ngl x 3(ZigZag/Block XYZmax)
dGdMzCt = -np.sum((Fmod*HbH)[:,:,:,nxs,:,nxs]*(dHdXZt*np.sin(HdotXD)[:,:,:,nxs,:,nxs])*glWt[nxs,nxs,nxs,nxs,:,nxs],axis=-2)
dGdMzCx = -np.sum((Fmod*HbH)[:,:,:,:,nxs]*(dHdXZx*np.sin(HdotXD)[:,:,:,:,nxs])*glWt[nxs,nxs,nxs,:,nxs],axis=-2)
dGdMzC = np.concatenate((np.sum(dGdMzCt,axis=-1),dGdMzCx),axis=-1)
dGdMzSt = np.sum((Fmod*HbH)[:,:,:,nxs,:,nxs]*(dHdXZt*np.cos(HdotXD)[:,:,:,nxs,:,nxs])*glWt[nxs,nxs,nxs,nxs,:,nxs],axis=-2)
dGdMzSx = np.sum((Fmod*HbH)[:,:,:,:,nxs]*(dHdXZx*np.cos(HdotXD)[:,:,:,:,nxs])*glWt[nxs,nxs,nxs,:,nxs],axis=-2)
dGdMzS = np.concatenate((np.sum(dGdMzSt,axis=-1),dGdMzSx),axis=-1)
# GSASIIpath.IPyBreak()
return [dGdMfC,dGdMfS],[dGdMxC,dGdMxS],[dGdMuC,dGdMuS],[dGdMzC,dGdMzS]
def posFourier(tau,psin,pcos):
A = np.array([ps[:,nxs]*np.sin(2*np.pi*(i+1)*tau) for i,ps in enumerate(psin)])
B = np.array([pc[:,nxs]*np.cos(2*np.pi*(i+1)*tau) for i,pc in enumerate(pcos)])
return np.sum(A,axis=0)+np.sum(B,axis=0)
def posZigZag(T,Tmm,Xmax):
DT = Tmm[1]-Tmm[0]
Su = 2.*Xmax/DT
Sd = 2.*Xmax/(1.-DT)
A = np.array([np.where(Tmm[0] < t%1. <= Tmm[1],-Xmax+Su*((t-Tmm[0])%1.),Xmax-Sd*((t-Tmm[1])%1.)) for t in T])
return A
def posZigZagDerv(T,Tmm,Xmax):
DT = Tmm[1]-Tmm[0]
Su = 2.*Xmax/DT
Sd = 2.*Xmax/(1.-DT)
dAdT = np.zeros((2,3,len(T)))
dAdT[0] = np.array([np.where(Tmm[0] < t <= Tmm[1],Su*(t-Tmm[0]-1)/DT,-Sd*(t-Tmm[1])/(1.-DT)) for t in T]).T
dAdT[1] = np.array([np.where(Tmm[0] < t <= Tmm[1],-Su*(t-Tmm[0])/DT,Sd*(t-Tmm[1])/(1.-DT)) for t in T]).T
dAdX = np.ones(3)[:,nxs]*np.array([np.where(Tmm[0] < t%1. <= Tmm[1],-1.+2.*(t-Tmm[0])/DT,1.-2.*(t-Tmm[1])%1./DT) for t in T])
return dAdT,dAdX
def posBlock(T,Tmm,Xmax):
A = np.array([np.where(Tmm[0] < t%1. <= Tmm[1],-Xmax,Xmax) for t in T])
return A
def posBlockDerv(T,Tmm,Xmax):
dAdT = np.zeros((2,3,len(T)))
ind = np.searchsorted(T,Tmm)
dAdT[0,:,ind[0]] = -Xmax/len(T)
dAdT[1,:,ind[1]] = Xmax/len(T)
dAdX = np.ones(3)[:,nxs]*np.array([np.where(Tmm[0] < t <= Tmm[1],-1.,1.) for t in T]) #OK
return dAdT,dAdX
def fracCrenel(tau,Toff,Twid):
Tau = (tau-Toff)%1.
A = np.where(Tau<Twid,1.,0.)
return A
def fracFourier(tau,fsin,fcos):
if len(fsin) == 1:
A = np.array([fsin[0]*np.sin(2.*np.pi*tau)])
B = np.array([fcos[0]*np.cos(2.*np.pi*tau)])
else:
A = np.array([fs[:,nxs]*np.sin(2.*np.pi*(i+1)*tau) for i,fs in enumerate(fsin)])
B = np.array([fc[:,nxs]*np.cos(2.*np.pi*(i+1)*tau) for i,fc in enumerate(fcos)])
return np.sum(A,axis=0)+np.sum(B,axis=0)
def ApplyModulation(data,tau):
'''Applies modulation to drawing atom positions & Uijs for given tau
'''
generalData = data['General']
cell = generalData['Cell'][1:7]
G,g = G2lat.cell2Gmat(cell)
SGData = generalData['SGData']
SSGData = generalData['SSGData']
cx,ct,cs,cia = generalData['AtomPtrs']
drawingData = data['Drawing']
modul = generalData['SuperVec'][0]
dcx,dct,dcs,dci = drawingData['atomPtrs']
atoms = data['Atoms']
drawAtoms = drawingData['Atoms']
Fade = np.ones(len(drawAtoms))
for atom in atoms:
atxyz = np.array(atom[cx:cx+3])
atuij = np.array(atom[cia+2:cia+8])
Sfrac = atom[-1]['SS1']['Sfrac']
Spos = atom[-1]['SS1']['Spos']
Sadp = atom[-1]['SS1']['Sadp']
if generalData['Type'] == 'magnetic':
Smag = atom[-1]['SS1']['Smag']
atmom = np.array(atom[cx+4:cx+7])
indx = FindAtomIndexByIDs(drawAtoms,dci,[atom[cia+8],],True)
for ind in indx:
drawatom = drawAtoms[ind]
opr = drawatom[dcs-1]
sop,ssop,icent,cent,unit = G2spc.OpsfromStringOps(opr,SGData,SSGData)
drxyz = (np.inner(sop[0],atxyz)+sop[1]+cent)*icent+np.array(unit)
tauT = G2spc.getTauT(tau,sop,ssop,drxyz,modul)[-1]
tauT *= icent #invert wave on -1
wave = np.zeros(3)
uwave = np.zeros(6)
mom = np.zeros(3)
if len(Sfrac):
scof = []
ccof = []
waveType = Sfrac[0]
for i,sfrac in enumerate(Sfrac[1:]):
if not i and 'Crenel' in waveType:
Fade[ind] += fracCrenel(tauT,sfrac[0][0],sfrac[0][1])
else:
scof.append(sfrac[0][0])
ccof.append(sfrac[0][1])
if len(scof):
Fade[ind] += np.sum(fracFourier(tauT,scof,ccof))
if len(Spos):
scof = []
ccof = []
waveType = Spos[0]
for i,spos in enumerate(Spos[1:]):
if waveType in ['ZigZag','Block'] and not i:
Tminmax = spos[0][:2]
XYZmax = np.array(spos[0][2:])
if waveType == 'Block':
wave = np.array(posBlock([tauT,],Tminmax,XYZmax))[0]
elif waveType == 'ZigZag':
wave = np.array(posZigZag([tauT,],Tminmax,XYZmax))[0]
else:
scof.append(spos[0][:3])
ccof.append(spos[0][3:])
if len(scof):
wave += np.sum(posFourier(tauT,np.array(scof),np.array(ccof)),axis=1)
if generalData['Type'] == 'magnetic' and len(Smag):
scof = []
ccof = []
waveType = Smag[0]
for i,spos in enumerate(Smag[1:]):
scof.append(spos[0][:3])
ccof.append(spos[0][3:])
if len(scof): #ToDo: something odd here, but it works
if icent < 0:
mom += np.sum(posFourier(.25-tauT,np.array(scof),np.array(ccof)),axis=1)
else:
mom += np.sum(posFourier(tauT*icent,np.array(scof),np.array(ccof)),axis=1)
if len(Sadp):
scof = []
ccof = []
waveType = Sadp[0]
for i,sadp in enumerate(Sadp[1:]):
scof.append(sadp[0][:6])
ccof.append(sadp[0][6:])
uwave += np.sum(posFourier(tauT,np.array(scof),np.array(ccof)),axis=1)
if atom[cia] == 'A':
X,U = G2spc.ApplyStringOps(opr,SGData,atxyz+wave,atuij+uwave)
drawatom[dcx:dcx+3] = X
drawatom[dci-6:dci] = U
else:
X = G2spc.ApplyStringOps(opr,SGData,atxyz+wave)
drawatom[dcx:dcx+3] = X
if generalData['Type'] == 'magnetic':
M = G2spc.ApplyStringOpsMom(opr,SGData,atmom+mom)
drawatom[dcx+3:dcx+6] = M
return drawAtoms,Fade
# gauleg.py Gauss Legendre numerical quadrature, x and w computation
# integrate from a to b using n evaluations of the function f(x)
# usage: from gauleg import gaulegf
# x,w = gaulegf( a, b, n)
# area = 0.0
# for i in range(1,n+1): # yes, 1..n
# area += w[i]*f(x[i])
def gaulegf(a, b, n):
x = range(n+1) # x[0] unused
w = range(n+1) # w[0] unused
eps = 3.0E-14
m = (n+1)/2
xm = 0.5*(b+a)
xl = 0.5*(b-a)
for i in range(1,m+1):
z = math.cos(3.141592654*(i-0.25)/(n+0.5))
while True:
p1 = 1.0
p2 = 0.0
for j in range(1,n+1):
p3 = p2
p2 = p1
p1 = ((2.0*j-1.0)*z*p2-(j-1.0)*p3)/j
pp = n*(z*p1-p2)/(z*z-1.0)
z1 = z
z = z1 - p1/pp
if abs(z-z1) <= eps:
break
x[i] = xm - xl*z
x[n+1-i] = xm + xl*z
w[i] = 2.0*xl/((1.0-z*z)*pp*pp)
w[n+1-i] = w[i]
return np.array(x), np.array(w)
# end gaulegf
def BessJn(nmax,x):
''' compute Bessel function J(n,x) from scipy routine & recurrance relation
returns sequence of J(n,x) for n in range [-nmax...0...nmax]
:param integer nmax: maximul order for Jn(x)
:param float x: argument for Jn(x)
:returns numpy array: [J(-nmax,x)...J(0,x)...J(nmax,x)]
'''
import scipy.special as sp
bessJn = np.zeros(2*nmax+1)
bessJn[nmax] = sp.j0(x)
bessJn[nmax+1] = sp.j1(x)
bessJn[nmax-1] = -bessJn[nmax+1]
for i in range(2,nmax+1):
bessJn[i+nmax] = 2*(i-1)*bessJn[nmax+i-1]/x-bessJn[nmax+i-2]
bessJn[nmax-i] = bessJn[i+nmax]*(-1)**i
return bessJn
def BessIn(nmax,x):
''' compute modified Bessel function I(n,x) from scipy routines & recurrance relation
returns sequence of I(n,x) for n in range [-nmax...0...nmax]
:param integer nmax: maximul order for In(x)
:param float x: argument for In(x)
:returns numpy array: [I(-nmax,x)...I(0,x)...I(nmax,x)]
'''
import scipy.special as sp
bessIn = np.zeros(2*nmax+1)
bessIn[nmax] = sp.i0(x)
bessIn[nmax+1] = sp.i1(x)
bessIn[nmax-1] = bessIn[nmax+1]
for i in range(2,nmax+1):
bessIn[i+nmax] = bessIn[nmax+i-2]-2*(i-1)*bessIn[nmax+i-1]/x
bessIn[nmax-i] = bessIn[i+nmax]
return bessIn
################################################################################
##### distance, angle, planes, torsion stuff
################################################################################
def CalcDist(distance_dict, distance_atoms, parmDict):
if not len(parmDict):
return 0.
pId = distance_dict['pId']
A = [parmDict['%s::A%d'%(pId,i)] for i in range(6)]
Amat = G2lat.cell2AB(G2lat.A2cell(A))[0]
Oxyz = [parmDict['%s::A%s:%d'%(pId,x,distance_atoms[0])] for x in ['x','y','z']]
Txyz = [parmDict['%s::A%s:%d'%(pId,x,distance_atoms[1])] for x in ['x','y','z']]
inv = 1
symNo = distance_dict['symNo']
if symNo < 0:
inv = -1
symNo *= -1
cen = symNo//100
op = symNo%100-1
M,T = distance_dict['SGData']['SGOps'][op]
D = T*inv+distance_dict['SGData']['SGCen'][cen]
D += distance_dict['cellNo']
Txyz = np.inner(M*inv,Txyz)+D
dist = np.sqrt(np.sum(np.inner(Amat,(Txyz-Oxyz))**2))
# GSASIIpath.IPyBreak()
return dist
def CalcDistDeriv(distance_dict, distance_atoms, parmDict):
if not len(parmDict):
return None
pId = distance_dict['pId']
A = [parmDict['%s::A%d'%(pId,i)] for i in range(6)]
Amat = G2lat.cell2AB(G2lat.A2cell(A))[0]
Oxyz = [parmDict['%s::A%s:%d'%(pId,x,distance_atoms[0])] for x in ['x','y','z']]
Txyz = [parmDict['%s::A%s:%d'%(pId,x,distance_atoms[1])] for x in ['x','y','z']]
symNo = distance_dict['symNo']
Tunit = distance_dict['cellNo']
SGData = distance_dict['SGData']
deriv = getDistDerv(Oxyz,Txyz,Amat,Tunit,symNo,SGData)
return deriv
def CalcAngle(angle_dict, angle_atoms, parmDict):
if not len(parmDict):
return 0.
pId = angle_dict['pId']
A = [parmDict['%s::A%d'%(pId,i)] for i in range(6)]
Amat = G2lat.cell2AB(G2lat.A2cell(A))[0]
Oxyz = [parmDict['%s::A%s:%d'%(pId,x,angle_atoms[0])] for x in ['x','y','z']]
Axyz = [parmDict['%s::A%s:%d'%(pId,x,angle_atoms[1][0])] for x in ['x','y','z']]
Bxyz = [parmDict['%s::A%s:%d'%(pId,x,angle_atoms[1][1])] for x in ['x','y','z']]
ABxyz = [Axyz,Bxyz]
symNo = angle_dict['symNo']
vec = np.zeros((2,3))
for i in range(2):
inv = 1
if symNo[i] < 0:
inv = -1
cen = inv*symNo[i]//100
op = inv*symNo[i]%100-1
M,T = angle_dict['SGData']['SGOps'][op]
D = T*inv+angle_dict['SGData']['SGCen'][cen]
D += angle_dict['cellNo'][i]
ABxyz[i] = np.inner(M*inv,ABxyz[i])+D
vec[i] = np.inner(Amat,(ABxyz[i]-Oxyz))
dist = np.sqrt(np.sum(vec[i]**2))
if not dist:
return 0.
vec[i] /= dist
angle = acosd(np.sum(vec[0]*vec[1]))
# GSASIIpath.IPyBreak()
return angle
def CalcAngleDeriv(angle_dict, angle_atoms, parmDict):
if not len(parmDict):
return None
pId = angle_dict['pId']
A = [parmDict['%s::A%d'%(pId,i)] for i in range(6)]
Amat = G2lat.cell2AB(G2lat.A2cell(A))[0]
Oxyz = [parmDict['%s::A%s:%d'%(pId,x,angle_atoms[0])] for x in ['x','y','z']]
Axyz = [parmDict['%s::A%s:%d'%(pId,x,angle_atoms[1][0])] for x in ['x','y','z']]
Bxyz = [parmDict['%s::A%s:%d'%(pId,x,angle_atoms[1][1])] for x in ['x','y','z']]
symNo = angle_dict['symNo']
Tunit = angle_dict['cellNo']
SGData = angle_dict['SGData']
deriv = getAngleDerv(Oxyz,Axyz,Bxyz,Amat,Tunit,symNo,SGData)
return deriv
def getSyXYZ(XYZ,ops,SGData):
'''default doc
:param type name: description
:returns: type name: description
'''
XYZout = np.zeros_like(XYZ)
for i,[xyz,op] in enumerate(zip(XYZ,ops)):
if op == '1':
XYZout[i] = xyz
else:
oprs = op.split('+')
unit = [0,0,0]
if len(oprs)>1:
unit = np.array(list(eval(oprs[1])))
syop =int(oprs[0])
inv = syop//abs(syop)
syop *= inv
cent = syop//100
syop %= 100
syop -= 1
M,T = SGData['SGOps'][syop]
XYZout[i] = (np.inner(M,xyz)+T)*inv+SGData['SGCen'][cent]+unit
return XYZout
def getRestDist(XYZ,Amat):
'''default doc string
:param type name: description
:returns: type name: description
'''
return np.sqrt(np.sum(np.inner(Amat,(XYZ[1]-XYZ[0]))**2))
def getRestDeriv(Func,XYZ,Amat,ops,SGData):
'''default doc string
:param type name: description
:returns: type name: description
'''
deriv = np.zeros((len(XYZ),3))
dx = 0.00001
for j,xyz in enumerate(XYZ):
for i,x in enumerate(np.array([[dx,0,0],[0,dx,0],[0,0,dx]])):
XYZ[j] -= x
d1 = Func(getSyXYZ(XYZ,ops,SGData),Amat)
XYZ[j] += 2*x
d2 = Func(getSyXYZ(XYZ,ops,SGData),Amat)
XYZ[j] -= x
deriv[j][i] = (d1-d2)/(2*dx)
return deriv.flatten()
def getRestAngle(XYZ,Amat):
'''default doc string
:param type name: description
:returns: type name: description
'''
def calcVec(Ox,Tx,Amat):
return np.inner(Amat,(Tx-Ox))
VecA = calcVec(XYZ[1],XYZ[0],Amat)
VecA /= np.sqrt(np.sum(VecA**2))
VecB = calcVec(XYZ[1],XYZ[2],Amat)
VecB /= np.sqrt(np.sum(VecB**2))
edge = VecB-VecA
edge = np.sum(edge**2)
angle = (2.-edge)/2.
angle = max(angle,-1.)
return acosd(angle)
def getRestPlane(XYZ,Amat):
'''default doc string
:param type name: description
:returns: type name: description
'''
sumXYZ = np.zeros(3)
for xyz in XYZ:
sumXYZ += xyz
sumXYZ /= len(XYZ)
XYZ = np.array(XYZ)-sumXYZ
XYZ = np.inner(Amat,XYZ).T
Zmat = np.zeros((3,3))
for i,xyz in enumerate(XYZ):
Zmat += np.outer(xyz.T,xyz)
Evec,Emat = nl.eig(Zmat)
Evec = np.sqrt(Evec)/(len(XYZ)-3)
Order = np.argsort(Evec)
return Evec[Order[0]]
def getRestChiral(XYZ,Amat):
'''default doc string
:param type name: description
:returns: type name: description
'''
VecA = np.empty((3,3))
VecA[0] = np.inner(XYZ[1]-XYZ[0],Amat)
VecA[1] = np.inner(XYZ[2]-XYZ[0],Amat)
VecA[2] = np.inner(XYZ[3]-XYZ[0],Amat)
return nl.det(VecA)
def getRestTorsion(XYZ,Amat):
'''default doc string
:param type name: description
:returns: type name: description
'''
VecA = np.empty((3,3))
VecA[0] = np.inner(XYZ[1]-XYZ[0],Amat)
VecA[1] = np.inner(XYZ[2]-XYZ[1],Amat)
VecA[2] = np.inner(XYZ[3]-XYZ[2],Amat)
D = nl.det(VecA)
Mag = np.sqrt(np.sum(VecA*VecA,axis=1))
P12 = np.sum(VecA[0]*VecA[1])/(Mag[0]*Mag[1])
P13 = np.sum(VecA[0]*VecA[2])/(Mag[0]*Mag[2])
P23 = np.sum(VecA[1]*VecA[2])/(Mag[1]*Mag[2])
Ang = 1.0
if abs(P12) < 1.0 and abs(P23) < 1.0:
Ang = (P12*P23-P13)/(np.sqrt(1.-P12**2)*np.sqrt(1.-P23**2))
TOR = (acosd(Ang)*D/abs(D)+720.)%360.
return TOR
def calcTorsionEnergy(TOR,Coeff=[]):
'''default doc string
:param type name: description
:returns: type name: description
'''
sum = 0.
if len(Coeff):
cof = np.reshape(Coeff,(3,3)).T
delt = TOR-cof[1]
delt = np.where(delt<-180.,delt+360.,delt)
delt = np.where(delt>180.,delt-360.,delt)
term = -cof[2]*delt**2
val = cof[0]*np.exp(term/1000.0)
pMax = cof[0][np.argmin(val)]
Eval = np.sum(val)
sum = Eval-pMax
return sum,Eval
def getTorsionDeriv(XYZ,Amat,Coeff):
'''default doc string
:param type name: description
:returns: type name: description
'''
deriv = np.zeros((len(XYZ),3))
dx = 0.00001
for j,xyz in enumerate(XYZ):
for i,x in enumerate(np.array([[dx,0,0],[0,dx,0],[0,0,dx]])):
XYZ[j] -= x
tor = getRestTorsion(XYZ,Amat)
p1,d1 = calcTorsionEnergy(tor,Coeff)
XYZ[j] += 2*x
tor = getRestTorsion(XYZ,Amat)
p2,d2 = calcTorsionEnergy(tor,Coeff)
XYZ[j] -= x
deriv[j][i] = (p2-p1)/(2*dx)
return deriv.flatten()
def getRestRama(XYZ,Amat):
'''Computes a pair of torsion angles in a 5 atom string
:param nparray XYZ: crystallographic coordinates of 5 atoms
:param nparray Amat: crystal to cartesian transformation matrix
:returns: list (phi,psi) two torsion angles in degrees
'''
phi = getRestTorsion(XYZ[:5],Amat)
psi = getRestTorsion(XYZ[1:],Amat)
return phi,psi
def calcRamaEnergy(phi,psi,Coeff=[]):
'''Computes pseudo potential energy from a pair of torsion angles and a
numerical description of the potential energy surface. Used to create
penalty function in LS refinement:
:math:`Eval(\\phi,\\psi) = C[0]*exp(-V/1000)`
where :math:`V = -C[3] * (\\phi-C[1])^2 - C[4]*(\\psi-C[2])^2 - 2*(\\phi-C[1])*(\\psi-C[2])`
:param float phi: first torsion angle (:math:`\\phi`)
:param float psi: second torsion angle (:math:`\\psi`)
:param list Coeff: pseudo potential coefficients
:returns: list (sum,Eval): pseudo-potential difference from minimum & value;
sum is used for penalty function.
'''
sum = 0.
Eval = 0.
if len(Coeff):
cof = Coeff.T
dPhi = phi-cof[1]
dPhi = np.where(dPhi<-180.,dPhi+360.,dPhi)
dPhi = np.where(dPhi>180.,dPhi-360.,dPhi)
dPsi = psi-cof[2]
dPsi = np.where(dPsi<-180.,dPsi+360.,dPsi)
dPsi = np.where(dPsi>180.,dPsi-360.,dPsi)
val = -cof[3]*dPhi**2-cof[4]*dPsi**2-2.0*cof[5]*dPhi*dPsi
val = cof[0]*np.exp(val/1000.)
pMax = cof[0][np.argmin(val)]
Eval = np.sum(val)
sum = Eval-pMax
return sum,Eval
def getRamaDeriv(XYZ,Amat,Coeff):
'''Computes numerical derivatives of torsion angle pair pseudo potential
with respect of crystallographic atom coordinates of the 5 atom sequence
:param nparray XYZ: crystallographic coordinates of 5 atoms
:param nparray Amat: crystal to cartesian transformation matrix
:param list Coeff: pseudo potential coefficients
:returns: list (deriv) derivatives of pseudopotential with respect to 5 atom
crystallographic xyz coordinates.
'''
deriv = np.zeros((len(XYZ),3))
dx = 0.00001
for j,xyz in enumerate(XYZ):
for i,x in enumerate(np.array([[dx,0,0],[0,dx,0],[0,0,dx]])):
XYZ[j] -= x
phi,psi = getRestRama(XYZ,Amat)
p1,d1 = calcRamaEnergy(phi,psi,Coeff)
XYZ[j] += 2*x
phi,psi = getRestRama(XYZ,Amat)
p2,d2 = calcRamaEnergy(phi,psi,Coeff)
XYZ[j] -= x
deriv[j][i] = (p2-p1)/(2*dx)
return deriv.flatten()
def getRestPolefig(ODFln,SamSym,Grid):
'''default doc string
:param type name: description
:returns: type name: description
'''
X,Y = np.meshgrid(np.linspace(1.,-1.,Grid),np.linspace(-1.,1.,Grid))
R,P = np.sqrt(X**2+Y**2).flatten(),atan2d(Y,X).flatten()
R = np.where(R <= 1.,2.*atand(R),0.0)
Z = np.zeros_like(R)
Z = G2lat.polfcal(ODFln,SamSym,R,P)
Z = np.reshape(Z,(Grid,Grid))
return np.reshape(R,(Grid,Grid)),np.reshape(P,(Grid,Grid)),Z
def getRestPolefigDerv(HKL,Grid,SHCoeff):
'''default doc string
:param type name: description
:returns: type name: description
'''
pass
def getDistDerv(Oxyz,Txyz,Amat,Tunit,Top,SGData):
'''default doc string
:param type name: description
:returns: type name: description
'''
def calcDist(Ox,Tx,U,inv,C,M,T,Amat):
TxT = inv*(np.inner(M,Tx)+T)+C+U
return np.sqrt(np.sum(np.inner(Amat,(TxT-Ox))**2))
inv = Top/abs(Top)
cent = abs(Top)//100
op = abs(Top)%100-1
M,T = SGData['SGOps'][op]
C = SGData['SGCen'][cent]
dx = .00001
deriv = np.zeros(6)
for i in [0,1,2]:
Oxyz[i] -= dx
d0 = calcDist(Oxyz,Txyz,Tunit,inv,C,M,T,Amat)
Oxyz[i] += 2*dx
deriv[i] = (calcDist(Oxyz,Txyz,Tunit,inv,C,M,T,Amat)-d0)/(2.*dx)
Oxyz[i] -= dx
Txyz[i] -= dx
d0 = calcDist(Oxyz,Txyz,Tunit,inv,C,M,T,Amat)
Txyz[i] += 2*dx
deriv[i+3] = (calcDist(Oxyz,Txyz,Tunit,inv,C,M,T,Amat)-d0)/(2.*dx)
Txyz[i] -= dx
return deriv
def getAngleDerv(Oxyz,Axyz,Bxyz,Amat,Tunit,symNo,SGData):
def calcAngle(Oxyz,ABxyz,Amat,Tunit,symNo,SGData):
vec = np.zeros((2,3))
for i in range(2):
inv = 1
if symNo[i] < 0:
inv = -1
cen = inv*symNo[i]//100
op = inv*symNo[i]%100-1
M,T = SGData['SGOps'][op]
D = T*inv+SGData['SGCen'][cen]
D += Tunit[i]
ABxyz[i] = np.inner(M*inv,ABxyz[i])+D
vec[i] = np.inner(Amat,(ABxyz[i]-Oxyz))
dist = np.sqrt(np.sum(vec[i]**2))
if not dist:
return 0.
vec[i] /= dist
angle = acosd(np.sum(vec[0]*vec[1]))
# GSASIIpath.IPyBreak()
return angle
dx = .00001
deriv = np.zeros(9)
for i in [0,1,2]:
Oxyz[i] -= dx
a0 = calcAngle(Oxyz,[Axyz,Bxyz],Amat,Tunit,symNo,SGData)
Oxyz[i] += 2*dx
deriv[i] = (calcAngle(Oxyz,[Axyz,Bxyz],Amat,Tunit,symNo,SGData)-a0)/(2.*dx)
Oxyz[i] -= dx
Axyz[i] -= dx
a0 = calcAngle(Oxyz,[Axyz,Bxyz],Amat,Tunit,symNo,SGData)
Axyz[i] += 2*dx
deriv[i+3] = (calcAngle(Oxyz,[Axyz,Bxyz],Amat,Tunit,symNo,SGData)-a0)/(2.*dx)
Axyz[i] -= dx
Bxyz[i] -= dx
a0 = calcAngle(Oxyz,[Axyz,Bxyz],Amat,Tunit,symNo,SGData)
Bxyz[i] += 2*dx
deriv[i+6] = (calcAngle(Oxyz,[Axyz,Bxyz],Amat,Tunit,symNo,SGData)-a0)/(2.*dx)
Bxyz[i] -= dx
return deriv
def getAngSig(VA,VB,Amat,SGData,covData={}):
'''default doc string
:param type name: description
:returns: type name: description
'''
def calcVec(Ox,Tx,U,inv,C,M,T,Amat):
TxT = inv*(np.inner(M,Tx)+T)+C+U
return np.inner(Amat,(TxT-Ox))
def calcAngle(Ox,TxA,TxB,unitA,unitB,invA,CA,MA,TA,invB,CB,MB,TB,Amat):
VecA = calcVec(Ox,TxA,unitA,invA,CA,MA,TA,Amat)
VecA /= np.sqrt(np.sum(VecA**2))
VecB = calcVec(Ox,TxB,unitB,invB,CB,MB,TB,Amat)
VecB /= np.sqrt(np.sum(VecB**2))
edge = VecB-VecA
edge = np.sum(edge**2)
angle = (2.-edge)/2.
angle = max(angle,-1.)
return acosd(angle)
OxAN,OxA,TxAN,TxA,unitA,TopA = VA
OxBN,OxB,TxBN,TxB,unitB,TopB = VB
invA = invB = 1
invA = TopA//abs(TopA)
invB = TopB//abs(TopB)
centA = abs(TopA)//100
centB = abs(TopB)//100
opA = abs(TopA)%100-1
opB = abs(TopB)%100-1
MA,TA = SGData['SGOps'][opA]
MB,TB = SGData['SGOps'][opB]
CA = SGData['SGCen'][centA]
CB = SGData['SGCen'][centB]
if 'covMatrix' in covData:
covMatrix = covData['covMatrix']
varyList = covData['varyList']
AngVcov = getVCov(OxAN+TxAN+TxBN,varyList,covMatrix)
dx = .00001
dadx = np.zeros(9)
Ang = calcAngle(OxA,TxA,TxB,unitA,unitB,invA,CA,MA,TA,invB,CB,MB,TB,Amat)
for i in [0,1,2]:
OxA[i] -= dx
a0 = calcAngle(OxA,TxA,TxB,unitA,unitB,invA,CA,MA,TA,invB,CB,MB,TB,Amat)
OxA[i] += 2*dx
dadx[i] = (calcAngle(OxA,TxA,TxB,unitA,unitB,invA,CA,MA,TA,invB,CB,MB,TB,Amat)-a0)/(2*dx)
OxA[i] -= dx
TxA[i] -= dx
a0 = calcAngle(OxA,TxA,TxB,unitA,unitB,invA,CA,MA,TA,invB,CB,MB,TB,Amat)
TxA[i] += 2*dx
dadx[i+3] = (calcAngle(OxA,TxA,TxB,unitA,unitB,invA,CA,MA,TA,invB,CB,MB,TB,Amat)-a0)/(2*dx)
TxA[i] -= dx
TxB[i] -= dx
a0 = calcAngle(OxA,TxA,TxB,unitA,unitB,invA,CA,MA,TA,invB,CB,MB,TB,Amat)
TxB[i] += 2*dx
dadx[i+6] = (calcAngle(OxA,TxA,TxB,unitA,unitB,invA,CA,MA,TA,invB,CB,MB,TB,Amat)-a0)/(2*dx)
TxB[i] -= dx
sigAng = np.sqrt(np.inner(dadx,np.inner(AngVcov,dadx)))
if sigAng < 0.01:
sigAng = 0.0
return Ang,sigAng
else:
return calcAngle(OxA,TxA,TxB,unitA,unitB,invA,CA,MA,TA,invB,CB,MB,TB,Amat),0.0
def GetDistSig(Oatoms,Atoms,Amat,SGData,covData={}):
'''default doc string
:param type name: description
:returns: type name: description
'''
def calcDist(Atoms,SyOps,Amat):
XYZ = []
for i,atom in enumerate(Atoms):
Inv,M,T,C,U = SyOps[i]
XYZ.append(np.array(atom[1:4]))
XYZ[-1] = Inv*(np.inner(M,np.array(XYZ[-1]))+T)+C+U
XYZ[-1] = np.inner(Amat,XYZ[-1]).T
V1 = XYZ[1]-XYZ[0]
return np.sqrt(np.sum(V1**2))
SyOps = []
names = []
for i,atom in enumerate(Oatoms):
names += atom[-1]
Op,unit = Atoms[i][-1]
inv = Op//abs(Op)
m,t = SGData['SGOps'][abs(Op)%100-1]
c = SGData['SGCen'][abs(Op)//100]
SyOps.append([inv,m,t,c,unit])
Dist = calcDist(Oatoms,SyOps,Amat)
sig = -0.001
if 'covMatrix' in covData:
dx = .00001
dadx = np.zeros(6)
for i in range(6):
ia = i//3
ix = i%3
Oatoms[ia][ix+1] += dx
a0 = calcDist(Oatoms,SyOps,Amat)
Oatoms[ia][ix+1] -= 2*dx
dadx[i] = (calcDist(Oatoms,SyOps,Amat)-a0)/(2.*dx)
covMatrix = covData['covMatrix']
varyList = covData['varyList']
DistVcov = getVCov(names,varyList,covMatrix)
sig = np.sqrt(np.inner(dadx,np.inner(DistVcov,dadx)))
if sig < 0.001:
sig = -0.001
return Dist,sig
def GetAngleSig(Oatoms,Atoms,Amat,SGData,covData={}):
'''default doc string
:param type name: description
:returns: type name: description
'''
def calcAngle(Atoms,SyOps,Amat):
XYZ = []
for i,atom in enumerate(Atoms):
Inv,M,T,C,U = SyOps[i]
XYZ.append(np.array(atom[1:4]))
XYZ[-1] = Inv*(np.inner(M,np.array(XYZ[-1]))+T)+C+U
XYZ[-1] = np.inner(Amat,XYZ[-1]).T
V1 = XYZ[1]-XYZ[0]
V1 /= np.sqrt(np.sum(V1**2))
V2 = XYZ[1]-XYZ[2]
V2 /= np.sqrt(np.sum(V2**2))
V3 = V2-V1
cang = min(1.,max((2.-np.sum(V3**2))/2.,-1.))
return acosd(cang)
SyOps = []
names = []
for i,atom in enumerate(Oatoms):
names += atom[-1]
Op,unit = Atoms[i][-1]
inv = Op//abs(Op)
m,t = SGData['SGOps'][abs(Op)%100-1]
c = SGData['SGCen'][abs(Op)//100]
SyOps.append([inv,m,t,c,unit])
Angle = calcAngle(Oatoms,SyOps,Amat)
sig = -0.01
if 'covMatrix' in covData:
dx = .00001
dadx = np.zeros(9)
for i in range(9):
ia = i//3
ix = i%3
Oatoms[ia][ix+1] += dx
a0 = calcAngle(Oatoms,SyOps,Amat)
Oatoms[ia][ix+1] -= 2*dx
dadx[i] = (calcAngle(Oatoms,SyOps,Amat)-a0)/(2.*dx)
covMatrix = covData['covMatrix']
varyList = covData['varyList']
AngVcov = getVCov(names,varyList,covMatrix)
sig = np.sqrt(np.inner(dadx,np.inner(AngVcov,dadx)))
if sig < 0.01:
sig = -0.01
return Angle,sig
def GetTorsionSig(Oatoms,Atoms,Amat,SGData,covData={}):
'''default doc string
:param type name: description
:returns: type name: description
'''
def calcTorsion(Atoms,SyOps,Amat):
XYZ = []
for i,atom in enumerate(Atoms):
Inv,M,T,C,U = SyOps[i]
XYZ.append(np.array(atom[1:4]))
XYZ[-1] = Inv*(np.inner(M,np.array(XYZ[-1]))+T)+C+U
XYZ[-1] = np.inner(Amat,XYZ[-1]).T
V1 = XYZ[1]-XYZ[0]
V2 = XYZ[2]-XYZ[1]
V3 = XYZ[3]-XYZ[2]
V1 /= np.sqrt(np.sum(V1**2))
V2 /= np.sqrt(np.sum(V2**2))
V3 /= np.sqrt(np.sum(V3**2))
M = np.array([V1,V2,V3])
D = nl.det(M)
P12 = np.dot(V1,V2)
P13 = np.dot(V1,V3)
P23 = np.dot(V2,V3)
Tors = acosd((P12*P23-P13)/(np.sqrt(1.-P12**2)*np.sqrt(1.-P23**2)))*D/abs(D)
return Tors
SyOps = []
names = []
for i,atom in enumerate(Oatoms):
names += atom[-1]
Op,unit = Atoms[i][-1]
inv = Op//abs(Op)
m,t = SGData['SGOps'][abs(Op)%100-1]
c = SGData['SGCen'][abs(Op)//100]
SyOps.append([inv,m,t,c,unit])
Tors = calcTorsion(Oatoms,SyOps,Amat)
sig = -0.01
if 'covMatrix' in covData:
dx = .00001
dadx = np.zeros(12)
for i in range(12):
ia = i//3
ix = i%3
Oatoms[ia][ix+1] -= dx
a0 = calcTorsion(Oatoms,SyOps,Amat)
Oatoms[ia][ix+1] += 2*dx
dadx[i] = (calcTorsion(Oatoms,SyOps,Amat)-a0)/(2.*dx)
Oatoms[ia][ix+1] -= dx
covMatrix = covData['covMatrix']
varyList = covData['varyList']
TorVcov = getVCov(names,varyList,covMatrix)
sig = np.sqrt(np.inner(dadx,np.inner(TorVcov,dadx)))
if sig < 0.01:
sig = -0.01
return Tors,sig
def GetDATSig(Oatoms,Atoms,Amat,SGData,covData={}):
'''default doc string
:param type name: description
:returns: type name: description
'''
def calcDist(Atoms,SyOps,Amat):
XYZ = []
for i,atom in enumerate(Atoms):
Inv,M,T,C,U = SyOps[i]
XYZ.append(np.array(atom[1:4]))
XYZ[-1] = Inv*(np.inner(M,np.array(XYZ[-1]))+T)+C+U
XYZ[-1] = np.inner(Amat,XYZ[-1]).T
V1 = XYZ[1]-XYZ[0]
return np.sqrt(np.sum(V1**2))
def calcAngle(Atoms,SyOps,Amat):
XYZ = []
for i,atom in enumerate(Atoms):
Inv,M,T,C,U = SyOps[i]
XYZ.append(np.array(atom[1:4]))
XYZ[-1] = Inv*(np.inner(M,np.array(XYZ[-1]))+T)+C+U
XYZ[-1] = np.inner(Amat,XYZ[-1]).T
V1 = XYZ[1]-XYZ[0]
V1 /= np.sqrt(np.sum(V1**2))
V2 = XYZ[1]-XYZ[2]
V2 /= np.sqrt(np.sum(V2**2))
V3 = V2-V1
cang = min(1.,max((2.-np.sum(V3**2))/2.,-1.))
return acosd(cang)
def calcTorsion(Atoms,SyOps,Amat):
XYZ = []
for i,atom in enumerate(Atoms):
Inv,M,T,C,U = SyOps[i]
XYZ.append(np.array(atom[1:4]))
XYZ[-1] = Inv*(np.inner(M,np.array(XYZ[-1]))+T)+C+U
XYZ[-1] = np.inner(Amat,XYZ[-1]).T
V1 = XYZ[1]-XYZ[0]
V2 = XYZ[2]-XYZ[1]
V3 = XYZ[3]-XYZ[2]
V1 /= np.sqrt(np.sum(V1**2))
V2 /= np.sqrt(np.sum(V2**2))
V3 /= np.sqrt(np.sum(V3**2))
M = np.array([V1,V2,V3])
D = nl.det(M)
P12 = np.dot(V1,V2)
P13 = np.dot(V1,V3)
P23 = np.dot(V2,V3)
Tors = acosd((P12*P23-P13)/(np.sqrt(1.-P12**2)*np.sqrt(1.-P23**2)))*D/abs(D)
return Tors
SyOps = []
names = []
for i,atom in enumerate(Oatoms):
names += atom[-1]
Op,unit = Atoms[i][-1]
inv = Op//abs(Op)
m,t = SGData['SGOps'][abs(Op)%100-1]
c = SGData['SGCen'][abs(Op)//100]
SyOps.append([inv,m,t,c,unit])
M = len(Oatoms)
if M == 2:
Val = calcDist(Oatoms,SyOps,Amat)
elif M == 3:
Val = calcAngle(Oatoms,SyOps,Amat)
else:
Val = calcTorsion(Oatoms,SyOps,Amat)
sigVals = [-0.001,-0.01,-0.01]
sig = sigVals[M-3]
if 'covMatrix' in covData:
dx = .00001
N = M*3
dadx = np.zeros(N)
for i in range(N):
ia = i//3
ix = i%3
Oatoms[ia][ix+1] += dx
if M == 2:
a0 = calcDist(Oatoms,SyOps,Amat)
elif M == 3:
a0 = calcAngle(Oatoms,SyOps,Amat)
else:
a0 = calcTorsion(Oatoms,SyOps,Amat)
Oatoms[ia][ix+1] -= 2*dx
if M == 2:
dadx[i] = (calcDist(Oatoms,SyOps,Amat)-a0)/(2.*dx)
elif M == 3:
dadx[i] = (calcAngle(Oatoms,SyOps,Amat)-a0)/(2.*dx)
else:
dadx[i] = (calcTorsion(Oatoms,SyOps,Amat)-a0)/(2.*dx)
covMatrix = covData['covMatrix']
varyList = covData['varyList']
Vcov = getVCov(names,varyList,covMatrix)
sig = np.sqrt(np.inner(dadx,np.inner(Vcov,dadx)))
if sig < sigVals[M-3]:
sig = sigVals[M-3]
return Val,sig
def ValEsd(value,esd=0,nTZ=False):
'''Format a floating point number with a given level of precision or
with in crystallographic format with a "esd", as value(esd). If esd is
negative the number is formatted with the level of significant figures
appropriate if abs(esd) were the esd, but the esd is not included.
if the esd is zero, approximately 6 significant figures are printed.
nTZ=True causes "extra" zeros to be removed after the decimal place.
for example:
* "1.235(3)" for value=1.2346 & esd=0.003
* "1.235(3)e4" for value=12346. & esd=30
* "1.235(3)e6" for value=0.12346e7 & esd=3000
* "1.235" for value=1.2346 & esd=-0.003
* "1.240" for value=1.2395 & esd=-0.003
* "1.24" for value=1.2395 & esd=-0.003 with nTZ=True
* "1.23460" for value=1.2346 & esd=0.0
:param float value: number to be formatted
:param float esd: uncertainty or if esd < 0, specifies level of
precision to be shown e.g. esd=-0.01 gives 2 places beyond decimal
:param bool nTZ: True to remove trailing zeros (default is False)
:returns: value(esd) or value as a string
'''
# Note: this routine is Python 3 compatible -- I think
cutoff = 3.16228 #=(sqrt(10); same as old GSAS was 1.95
if math.isnan(value): # invalid value, bail out
return '?'
if math.isnan(esd): # invalid esd, treat as zero
esd = 0
esdoff = 5
# if esd < 1.e-5:
# esd = 0
# esdoff = 5
elif esd != 0:
# transform the esd to a one or two digit integer
l = math.log10(abs(esd)) % 1.
if l < math.log10(cutoff): l+= 1.
intesd = int(round(10**l)) # esd as integer
# determine the number of digits offset for the esd
esdoff = int(round(math.log10(intesd*1./abs(esd))))
else:
esdoff = 5
valoff = 0
if abs(value) < abs(esdoff): # value is effectively zero
pass
elif esdoff < 0 or abs(value) > 1.0e6 or abs(value) < 1.0e-4: # use scientific notation
# where the digit offset is to the left of the decimal place or where too many
# digits are needed
if abs(value) > 1:
valoff = int(math.log10(abs(value)))
elif abs(value) > 0:
valoff = int(math.log10(abs(value))-0.9999999)
else:
valoff = 0
if esd != 0:
if valoff+esdoff < 0:
valoff = esdoff = 0
out = ("{:."+str(valoff+esdoff)+"f}").format(value/10**valoff) # format the value
elif valoff != 0: # esd = 0; exponential notation ==> esdoff decimal places
out = ("{:."+str(esdoff)+"f}").format(value/10**valoff) # format the value
else: # esd = 0; non-exponential notation ==> esdoff+1 significant digits
if abs(value) > 0:
extra = -math.log10(abs(value))
else:
extra = 0
if extra > 0: extra += 1
out = ("{:."+str(max(0,esdoff+int(extra)))+"f}").format(value) # format the value
if esd > 0:
out += ("({:d})").format(intesd) # add the esd
elif nTZ and '.' in out:
out = out.rstrip('0') # strip zeros to right of decimal
out = out.rstrip('.') # and decimal place when not needed
if valoff != 0:
out += ("e{:d}").format(valoff) # add an exponent, when needed
return out
###############################################################################
##### Protein validation - "ERRATV2" analysis
###############################################################################
def validProtein(Phase,old):
def sumintact(intact):
return {'CC':intact['CC'],'NN':intact['NN'],'OO':intact['OO'],
'CN':(intact['CN']+intact['NC']),'CO':(intact['CO']+intact['OC']),
'NO':(intact['NO']+intact['ON'])}
resNames = ['ALA','ARG','ASN','ASP','CYS','GLN','GLU','GLY','HIS','ILE',
'LEU','LYS','MET','PHE','PRO','SER','THR','TRP','TYR','VAL','MSE']
# data from errat.f
b1_old = np.array([
[1154.343, 600.213, 1051.018, 1132.885, 960.738],
[600.213, 1286.818, 1282.042, 957.156, 612.789],
[1051.018, 1282.042, 3519.471, 991.974, 1226.491],
[1132.885, 957.156, 991.974, 1798.672, 820.355],
[960.738, 612.789, 1226.491, 820.355, 2428.966]
])
avg_old = np.array([ 0.225, 0.281, 0.071, 0.237, 0.044]) #Table 1 3.5A Obsd. Fr. p 1513
# data taken from erratv2.ccp
b1 = np.array([
[5040.279078850848200, 3408.805141583649400, 4152.904423767300600, 4236.200004171890200, 5054.781210204625500],
[3408.805141583648900, 8491.906094010220800, 5958.881777877950300, 1521.387352718486200, 4304.078200827221700],
[4152.904423767301500, 5958.881777877952100, 7637.167089335050100, 6620.715738223072500, 5287.691183798410700],
[4236.200004171890200, 1521.387352718486200, 6620.715738223072500, 18368.343774298410000, 4050.797811118806700],
[5054.781210204625500, 4304.078200827220800, 5287.691183798409800, 4050.797811118806700, 6666.856740479164700]])
avg = np.array([0.192765509919262, 0.195575208778518, 0.275322406824210, 0.059102357035642, 0.233154192767480])
General = Phase['General']
Amat,Bmat = G2lat.cell2AB(General['Cell'][1:7])
cx,ct,cs,cia = General['AtomPtrs']
Atoms = Phase['Atoms']
cartAtoms = []
xyzmin = 999.*np.ones(3)
xyzmax = -999.*np.ones(3)
#select residue atoms,S,Se --> O make cartesian
for atom in Atoms:
if atom[1] in resNames:
cartAtoms.append(atom[:cx+3])
if atom[4].strip() in ['S','Se']:
if not old:
continue #S,Se skipped for erratv2?
cartAtoms[-1][3] = 'Os'
cartAtoms[-1][4] = 'O'
cartAtoms[-1][cx:cx+3] = np.inner(Amat,cartAtoms[-1][cx:cx+3])
cartAtoms[-1].append(atom[cia+8])
XYZ = np.array([atom[cx:cx+3] for atom in cartAtoms])
xyzmin = np.array([np.min(XYZ.T[i]) for i in [0,1,2]])
xyzmax = np.array([np.max(XYZ.T[i]) for i in [0,1,2]])
nbox = list(np.array(np.ceil((xyzmax-xyzmin)/4.),dtype=int))+[15,]
Boxes = np.zeros(nbox,dtype=int)
iBox = np.array([np.trunc((XYZ.T[i]-xyzmin[i])/4.) for i in [0,1,2]],dtype=int).T
for ib,box in enumerate(iBox): #put in a try for too many atoms in box (IndexError)?
try:
Boxes[box[0],box[1],box[2],0] += 1
Boxes[box[0],box[1],box[2],Boxes[box[0],box[1],box[2],0]] = ib
except IndexError:
print('too many atoms in box' )
continue
#Box content checks with errat.f $ erratv2.cpp ibox1 arrays
indices = (-1,0,1)
Units = np.array([[h,k,l] for h in indices for k in indices for l in indices])
dsmax = 3.75**2
if old:
dsmax = 3.5**2
chains = []
resIntAct = []
chainIntAct = []
res = []
resNames = []
resIDs = {}
resname = []
resID = {}
newChain = True
intact = {'CC':0,'CN':0,'CO':0,'NN':0,'NO':0,'OO':0,'NC':0,'OC':0,'ON':0}
for ia,atom in enumerate(cartAtoms):
jntact = {'CC':0,'CN':0,'CO':0,'NN':0,'NO':0,'OO':0,'NC':0,'OC':0,'ON':0}
if atom[2] not in chains: #get chain id & save residue sequence from last chain
chains.append(atom[2])
if len(resIntAct):
resIntAct.append(sumintact(intact))
chainIntAct.append(resIntAct)
resNames += resname
resIDs.update(resID)
res = []
resname = []
resID = {}
resIntAct = []
intact = {'CC':0,'CN':0,'CO':0,'NN':0,'NO':0,'OO':0,'NC':0,'OC':0,'ON':0}
newChain = True
if atom[0] not in res: #new residue, get residue no.
if res and int(res[-1]) != int(atom[0])-1: #a gap in chain - not new chain
intact = {'CC':0,'CN':0,'CO':0,'NN':0,'NO':0,'OO':0,'NC':0,'OC':0,'ON':0}
ires = int(res[-1])
for i in range(int(atom[0])-ires-1):
res.append(str(ires+i+1))
resname.append('')
resIntAct.append(sumintact(intact))
res.append(atom[0])
name = '%s-%s%s'%(atom[2],atom[0],atom[1])
resname.append(name)
resID[name] = atom[-1]
if not newChain:
resIntAct.append(sumintact(intact))
intact = {'CC':0,'CN':0,'CO':0,'NN':0,'NO':0,'OO':0,'NC':0,'OC':0,'ON':0}
newChain = False
ibox = iBox[ia] #box location of atom
tgts = []
for unit in Units: #assemble list of all possible target atoms
jbox = ibox+unit
if np.all(jbox>=0) and np.all((jbox-nbox[:3])<0):
tgts += list(Boxes[jbox[0],jbox[1],jbox[2]])
tgts = list(set(tgts))
tgts = [tgt for tgt in tgts if atom[:3] != cartAtoms[tgt][:3]] #exclude same residue
tgts = [tgt for tgt in tgts if np.sum((XYZ[ia]-XYZ[tgt])**2) < dsmax]
ires = int(atom[0])
if old:
if atom[3].strip() == 'C':
tgts = [tgt for tgt in tgts if not (cartAtoms[tgt][3].strip() == 'N' and int(cartAtoms[tgt][0]) in [ires-1,ires+1])]
elif atom[3].strip() == 'N':
tgts = [tgt for tgt in tgts if not (cartAtoms[tgt][3].strip() in ['C','CA'] and int(cartAtoms[tgt][0]) in [ires-1,ires+1])]
elif atom[3].strip() == 'CA':
tgts = [tgt for tgt in tgts if not (cartAtoms[tgt][3].strip() == 'N' and int(cartAtoms[tgt][0]) in [ires-1,ires+1])]
else:
tgts = [tgt for tgt in tgts if not int(cartAtoms[tgt][0]) in [ires+1,ires+2,ires+3,ires+4,ires+5,ires+6,ires+7,ires+8]]
if atom[3].strip() == 'C':
tgts = [tgt for tgt in tgts if not (cartAtoms[tgt][3].strip() == 'N' and int(cartAtoms[tgt][0]) == ires+1)]
elif atom[3].strip() == 'N':
tgts = [tgt for tgt in tgts if not (cartAtoms[tgt][3].strip() == 'C' and int(cartAtoms[tgt][0]) == ires-1)]
for tgt in tgts:
dsqt = np.sqrt(np.sum((XYZ[ia]-XYZ[tgt])**2))
mult = 1.0
if dsqt > 3.25 and not old:
mult = 2.*(3.75-dsqt)
intype = atom[4].strip()+cartAtoms[tgt][4].strip()
if 'S' not in intype:
intact[intype] += mult
jntact[intype] += mult
# print ia,atom[0]+atom[1]+atom[3],tgts,jntact['CC'],jntact['CN']+jntact['NC'],jntact['CO']+jntact['OC'],jntact['NN'],jntact['NO']+jntact['ON']
resNames += resname
resIDs.update(resID)
resIntAct.append(sumintact(intact))
chainIntAct.append(resIntAct)
chainProb = []
for ich,chn in enumerate(chains):
IntAct = chainIntAct[ich]
nRes = len(IntAct)
Probs = [0.,0.,0.,0.] #skip 1st 4 residues in chain
for i in range(4,nRes-4):
if resNames[i]:
mtrx = np.zeros(5)
summ = 0.
for j in range(i-4,i+5):
summ += np.sum(np.array(list(IntAct[j].values())))
if old:
mtrx[0] += IntAct[j]['CC']
mtrx[1] += IntAct[j]['CO']
mtrx[2] += IntAct[j]['NN']
mtrx[3] += IntAct[j]['NO']
mtrx[4] += IntAct[j]['OO']
else:
mtrx[0] += IntAct[j]['CC']
mtrx[1] += IntAct[j]['CN']
mtrx[2] += IntAct[j]['CO']
mtrx[3] += IntAct[j]['NN']
mtrx[4] += IntAct[j]['NO']
mtrx /= summ
# print i+1,mtrx*summ
if old:
mtrx -= avg_old
prob = np.inner(np.inner(mtrx,b1_old),mtrx)
else:
mtrx -= avg
prob = np.inner(np.inner(mtrx,b1),mtrx)
else: #skip the gaps
prob = 0.0
Probs.append(prob)
Probs += 4*[0.,] #skip last 4 residues in chain
chainProb += Probs
return resNames,chainProb,resIDs
################################################################################
##### Texture fitting stuff
################################################################################
def FitTexture(General,Gangls,refData,keyList,pgbar):
import pytexture as ptx
ptx.pyqlmninit() #initialize fortran arrays for spherical harmonics
def printSpHarm(textureData,SHtextureSig):
print ('\n Spherical harmonics texture: Order:' + str(textureData['Order']))
names = ['omega','chi','phi']
namstr = ' names :'
ptstr = ' values:'
sigstr = ' esds :'
for name in names:
namstr += '%12s'%('Sample '+name)
ptstr += '%12.3f'%(textureData['Sample '+name][1])
if 'Sample '+name in SHtextureSig:
sigstr += '%12.3f'%(SHtextureSig['Sample '+name])
else:
sigstr += 12*' '
print (namstr)
print (ptstr)
print (sigstr)
print ('\n Texture coefficients:')
SHcoeff = textureData['SH Coeff'][1]
SHkeys = list(SHcoeff.keys())
nCoeff = len(SHcoeff)
nBlock = nCoeff//10+1
iBeg = 0
iFin = min(iBeg+10,nCoeff)
for block in range(nBlock):
namstr = ' names :'
ptstr = ' values:'
sigstr = ' esds :'
for name in SHkeys[iBeg:iFin]:
if 'C' in name:
namstr += '%12s'%(name)
ptstr += '%12.3f'%(SHcoeff[name])
if name in SHtextureSig:
sigstr += '%12.3f'%(SHtextureSig[name])
else:
sigstr += 12*' '
print (namstr)
print (ptstr)
print (sigstr)
iBeg += 10
iFin = min(iBeg+10,nCoeff)
def Dict2Values(parmdict, varylist):
'''Use before call to leastsq to setup list of values for the parameters
in parmdict, as selected by key in varylist'''
return [parmdict[key] for key in varylist]
def Values2Dict(parmdict, varylist, values):
''' Use after call to leastsq to update the parameter dictionary with
values corresponding to keys in varylist'''
parmdict.update(list(zip(varylist,values)))
def errSpHarm(values,SGData,cell,Gangls,shModel,refData,parmDict,varyList,pgbar):
parmDict.update(list(zip(varyList,values)))
Mat = np.empty(0)
sumObs = 0
Sangls = [parmDict['Sample '+'omega'],parmDict['Sample '+'chi'],parmDict['Sample '+'phi']]
for hist in Gangls.keys():
Refs = refData[hist]
Refs[:,5] = np.where(Refs[:,5]>0.,Refs[:,5],0.)
wt = 1./np.sqrt(np.fmax(Refs[:,4],.25))
# wt = 1./np.max(Refs[:,4],.25)
sumObs += np.sum(wt*Refs[:,5])
Refs[:,6] = 1.
H = Refs[:,:3]
phi,beta = G2lat.CrsAng(H,cell,SGData)
psi,gam,x,x = G2lat.SamAng(Refs[:,3]/2.,Gangls[hist],Sangls,False) #assume not Bragg-Brentano!
for item in parmDict:
if 'C' in item:
L,M,N = eval(item.strip('C'))
Kcl = G2lat.GetKcl(L,N,SGData['SGLaue'],phi,beta)
Ksl,x,x = G2lat.GetKsl(L,M,shModel,psi,gam)
Lnorm = G2lat.Lnorm(L)
Refs[:,6] += parmDict[item]*Lnorm*Kcl*Ksl
mat = wt*(Refs[:,5]-Refs[:,6])
Mat = np.concatenate((Mat,mat))
sumD = np.sum(np.abs(Mat))
R = min(100.,100.*sumD/sumObs)
pgbar.Update(R,newmsg='Residual = %5.2f'%(R))
print (' Residual: %.3f%%'%(R))
return Mat
def dervSpHarm(values,SGData,cell,Gangls,shModel,refData,parmDict,varyList,pgbar):
Mat = np.empty(0)
Sangls = [parmDict['Sample omega'],parmDict['Sample chi'],parmDict['Sample phi']]
for hist in Gangls.keys():
mat = np.zeros((len(varyList),len(refData[hist])))
Refs = refData[hist]
H = Refs[:,:3]
wt = 1./np.sqrt(np.fmax(Refs[:,4],.25))
# wt = 1./np.max(Refs[:,4],.25)
phi,beta = G2lat.CrsAng(H,cell,SGData)
psi,gam,dPdA,dGdA = G2lat.SamAng(Refs[:,3]/2.,Gangls[hist],Sangls,False) #assume not Bragg-Brentano!
for j,item in enumerate(varyList):
if 'C' in item:
L,M,N = eval(item.strip('C'))
Kcl = G2lat.GetKcl(L,N,SGData['SGLaue'],phi,beta)
Ksl,dKdp,dKdg = G2lat.GetKsl(L,M,shModel,psi,gam)
Lnorm = G2lat.Lnorm(L)
mat[j] = -wt*Lnorm*Kcl*Ksl
for k,itema in enumerate(['Sample omega','Sample chi','Sample phi']):
try:
l = varyList.index(itema)
mat[l] -= parmDict[item]*wt*Lnorm*Kcl*(dKdp*dPdA[k]+dKdg*dGdA[k])
except ValueError:
pass
if len(Mat):
Mat = np.concatenate((Mat,mat.T))
else:
Mat = mat.T
print ('deriv')
return Mat
print (' Fit texture for '+General['Name'])
SGData = General['SGData']
cell = General['Cell'][1:7]
Texture = General['SH Texture']
if not Texture['Order']:
return 'No spherical harmonics coefficients'
varyList = []
parmDict = copy.copy(Texture['SH Coeff'][1])
for item in ['Sample omega','Sample chi','Sample phi']:
parmDict[item] = Texture[item][1]
if Texture[item][0]:
varyList.append(item)
if Texture['SH Coeff'][0]:
varyList += list(Texture['SH Coeff'][1].keys())
while True:
begin = time.time()
values = np.array(Dict2Values(parmDict, varyList))
result = so.leastsq(errSpHarm,values,Dfun=dervSpHarm,full_output=True,ftol=1.e-6,
args=(SGData,cell,Gangls,Texture['Model'],refData,parmDict,varyList,pgbar))
ncyc = int(result[2]['nfev']//2)
if ncyc:
runtime = time.time()-begin
chisq = np.sum(result[2]['fvec']**2)
Values2Dict(parmDict, varyList, result[0])
GOF = chisq/(len(result[2]['fvec'])-len(varyList)) #reduced chi^2
print ('Number of function calls: %d Number of observations: %d Number of parameters: %d'%(result[2]['nfev'],len(result[2]['fvec']),len(varyList)))
print ('refinement time = %8.3fs, %8.3fs/cycle'%(runtime,runtime/ncyc))
try:
sig = np.sqrt(np.diag(result[1])*GOF)
if np.any(np.isnan(sig)):
print ('*** Least squares aborted - some invalid esds possible ***')
break #refinement succeeded - finish up!
except ValueError: #result[1] is None on singular matrix
print ('**** Refinement failed - singular matrix ****')
return None
else:
break
if ncyc:
for parm in parmDict:
if 'C' in parm:
Texture['SH Coeff'][1][parm] = parmDict[parm]
else:
Texture[parm][1] = parmDict[parm]
sigDict = dict(zip(varyList,sig))
printSpHarm(Texture,sigDict)
return None
################################################################################
##### Fourier & charge flip stuff
################################################################################
def adjHKLmax(SGData,Hmax):
'''default doc string
:param type name: description
:returns: type name: description
'''
if SGData['SGLaue'] in ['3','3m1','31m','6/m','6/mmm']:
Hmax[0] = int(math.ceil(Hmax[0]/6.))*6
Hmax[1] = int(math.ceil(Hmax[1]/6.))*6
Hmax[2] = int(math.ceil(Hmax[2]/4.))*4
else:
Hmax[0] = int(math.ceil(Hmax[0]/4.))*4
Hmax[1] = int(math.ceil(Hmax[1]/4.))*4
Hmax[2] = int(math.ceil(Hmax[2]/4.))*4
def OmitMap(data,reflDict,pgbar=None):
'''default doc string
:param type name: description
:returns: type name: description
'''
generalData = data['General']
if not generalData['Map']['MapType']:
print ('**** ERROR - Fourier map not defined')
return
mapData = generalData['Map']
dmin = mapData['Resolution']
SGData = generalData['SGData']
SGMT = np.array([ops[0].T for ops in SGData['SGOps']])
SGT = np.array([ops[1] for ops in SGData['SGOps']])
cell = generalData['Cell'][1:8]
A = G2lat.cell2A(cell[:6])
Hmax = np.asarray(G2lat.getHKLmax(dmin,SGData,A),dtype='i')+1
adjHKLmax(SGData,Hmax)
Fhkl = np.zeros(shape=2*Hmax,dtype='c16')
time0 = time.time()
for iref,ref in enumerate(reflDict['RefList']):
if ref[4] >= dmin:
Fosq,Fcsq,ph = ref[8:11]
Uniq = np.inner(ref[:3],SGMT)
Phi = np.inner(ref[:3],SGT)
for i,hkl in enumerate(Uniq): #uses uniq
hkl = np.asarray(hkl,dtype='i')
dp = 360.*Phi[i] #and phi
a = cosd(ph+dp)
b = sind(ph+dp)
phasep = complex(a,b)
phasem = complex(a,-b)
if '2Fo-Fc' in mapData['MapType']:
F = 2.*np.sqrt(Fosq)-np.sqrt(Fcsq)
else:
F = np.sqrt(Fosq)
h,k,l = hkl+Hmax
Fhkl[h,k,l] = F*phasep
h,k,l = -hkl+Hmax
Fhkl[h,k,l] = F*phasem
rho0 = fft.fftn(fft.fftshift(Fhkl))/cell[6]
M = np.mgrid[0:4,0:4,0:4]
blkIds = np.array(list(zip(M[0].flatten(),M[1].flatten(),M[2].flatten())))
iBeg = blkIds*rho0.shape//4
iFin = (blkIds+1)*rho0.shape//4
rho_omit = np.zeros_like(rho0)
nBlk = 0
for iB,iF in zip(iBeg,iFin):
rho1 = np.copy(rho0)
rho1[iB[0]:iF[0],iB[1]:iF[1],iB[2]:iF[2]] = 0.
Fnew = fft.ifftshift(fft.ifftn(rho1))
Fnew = np.where(Fnew,Fnew,1.0) #avoid divide by zero
phase = Fnew/np.absolute(Fnew)
OFhkl = np.absolute(Fhkl)*phase
rho1 = np.real(fft.fftn(fft.fftshift(OFhkl)))*(1.+0j)
rho_omit[iB[0]:iF[0],iB[1]:iF[1],iB[2]:iF[2]] = np.copy(rho1[iB[0]:iF[0],iB[1]:iF[1],iB[2]:iF[2]])
nBlk += 1
pgbar.Update(nBlk)
mapData['rho'] = np.real(rho_omit)/cell[6]
mapData['rhoMax'] = max(np.max(mapData['rho']),-np.min(mapData['rho']))
mapData['minmax'] = [np.max(mapData['rho']),np.min(mapData['rho'])]
print ('Omit map time: %.4f no. elements: %d'%(time.time()-time0,Fhkl.size))
return mapData
def FourierMap(data,reflDict):
'''default doc string
:param type name: description
:returns: type name: description
'''
generalData = data['General']
mapData = generalData['Map']
dmin = mapData['Resolution']
SGData = generalData['SGData']
SGMT = np.array([ops[0].T for ops in SGData['SGOps']])
SGT = np.array([ops[1] for ops in SGData['SGOps']])
cell = generalData['Cell'][1:8]
A = G2lat.cell2A(cell[:6])
Hmax = np.asarray(G2lat.getHKLmax(dmin,SGData,A),dtype='i')+1
adjHKLmax(SGData,Hmax)
Fhkl = np.zeros(shape=2*Hmax,dtype='c16')
# Fhkl[0,0,0] = generalData['F000X']
time0 = time.time()
for iref,ref in enumerate(reflDict['RefList']):
if ref[4] > dmin:
Fosq,Fcsq,ph = ref[8:11]
Uniq = np.inner(ref[:3],SGMT)
Phi = np.inner(ref[:3],SGT)
for i,hkl in enumerate(Uniq): #uses uniq
hkl = np.asarray(hkl,dtype='i')
dp = 360.*Phi[i] #and phi
a = cosd(ph+dp)
b = sind(ph+dp)
phasep = complex(a,b)
phasem = complex(a,-b)
if 'Fobs' in mapData['MapType']:
F = np.where(Fosq>0.,np.sqrt(Fosq),0.)
h,k,l = hkl+Hmax
Fhkl[h,k,l] = F*phasep
h,k,l = -hkl+Hmax
Fhkl[h,k,l] = F*phasem
elif 'Fcalc' in mapData['MapType']:
F = np.sqrt(Fcsq)
h,k,l = hkl+Hmax
Fhkl[h,k,l] = F*phasep
h,k,l = -hkl+Hmax
Fhkl[h,k,l] = F*phasem
elif 'delt-F' in mapData['MapType']:
dF = np.where(Fosq>0.,np.sqrt(Fosq),0.)-np.sqrt(Fcsq)
h,k,l = hkl+Hmax
Fhkl[h,k,l] = dF*phasep
h,k,l = -hkl+Hmax
Fhkl[h,k,l] = dF*phasem
elif '2*Fo-Fc' in mapData['MapType']:
F = 2.*np.where(Fosq>0.,np.sqrt(Fosq),0.)-np.sqrt(Fcsq)
h,k,l = hkl+Hmax
Fhkl[h,k,l] = F*phasep
h,k,l = -hkl+Hmax
Fhkl[h,k,l] = F*phasem
elif 'Patterson' in mapData['MapType']:
h,k,l = hkl+Hmax
Fhkl[h,k,l] = complex(Fosq,0.)
h,k,l = -hkl+Hmax
Fhkl[h,k,l] = complex(Fosq,0.)
rho = fft.fftn(fft.fftshift(Fhkl))/cell[6]
print ('Fourier map time: %.4f'%(time.time()-time0),'no. elements: %d'%(Fhkl.size))
mapData['Type'] = reflDict['Type']
mapData['rho'] = np.real(rho)
mapData['rhoMax'] = max(np.max(mapData['rho']),-np.min(mapData['rho']))
mapData['minmax'] = [np.max(mapData['rho']),np.min(mapData['rho'])]
def Fourier4DMap(data,reflDict):
'''default doc string
:param type name: description
:returns: type name: description
'''
generalData = data['General']
map4DData = generalData['4DmapData']
mapData = generalData['Map']
dmin = mapData['Resolution']
SGData = generalData['SGData']
SSGData = generalData['SSGData']
SSGMT = np.array([ops[0].T for ops in SSGData['SSGOps']])
SSGT = np.array([ops[1] for ops in SSGData['SSGOps']])
cell = generalData['Cell'][1:8]
A = G2lat.cell2A(cell[:6])
maxM = 4
Hmax = G2lat.getHKLmax(dmin,SGData,A)+[maxM,]
adjHKLmax(SGData,Hmax)
Hmax = np.asarray(Hmax,dtype='i')+1
Fhkl = np.zeros(shape=2*Hmax,dtype='c16')
time0 = time.time()
for iref,ref in enumerate(reflDict['RefList']):
if ref[5] > dmin:
Fosq,Fcsq,ph = ref[9:12]
Fosq = np.where(Fosq>0.,Fosq,0.) #can't use Fo^2 < 0
Uniq = np.inner(ref[:4],SSGMT)
Phi = np.inner(ref[:4],SSGT)
for i,hkl in enumerate(Uniq): #uses uniq
hkl = np.asarray(hkl,dtype='i')
dp = 360.*Phi[i] #and phi
a = cosd(ph+dp)
b = sind(ph+dp)
phasep = complex(a,b)
phasem = complex(a,-b)
if 'Fobs' in mapData['MapType']:
F = np.sqrt(Fosq)
h,k,l,m = hkl+Hmax
Fhkl[h,k,l,m] = F*phasep
h,k,l,m = -hkl+Hmax
Fhkl[h,k,l,m] = F*phasem
elif 'Fcalc' in mapData['MapType']:
F = np.sqrt(Fcsq)
h,k,l,m = hkl+Hmax
Fhkl[h,k,l,m] = F*phasep
h,k,l,m = -hkl+Hmax
Fhkl[h,k,l,m] = F*phasem
elif 'delt-F' in mapData['MapType']:
dF = np.sqrt(Fosq)-np.sqrt(Fcsq)
h,k,l,m = hkl+Hmax
Fhkl[h,k,l,m] = dF*phasep
h,k,l,m = -hkl+Hmax
Fhkl[h,k,l,m] = dF*phasem
SSrho = fft.fftn(fft.fftshift(Fhkl))/cell[6] #4D map
rho = fft.fftn(fft.fftshift(Fhkl[:,:,:,maxM+1]))/cell[6] #3D map
map4DData['rho'] = np.real(SSrho)
map4DData['rhoMax'] = max(np.max(map4DData['rho']),-np.min(map4DData['rho']))
map4DData['minmax'] = [np.max(map4DData['rho']),np.min(map4DData['rho'])]
map4DData['Type'] = reflDict['Type']
mapData['Type'] = reflDict['Type']
mapData['rho'] = np.real(rho)
mapData['rhoMax'] = max(np.max(mapData['rho']),-np.min(mapData['rho']))
mapData['minmax'] = [np.max(mapData['rho']),np.min(mapData['rho'])]
print ('Fourier map time: %.4f'%(time.time()-time0),'no. elements: %d'%(Fhkl.size))
# map printing for testing purposes
def printRho(SGLaue,rho,rhoMax):
'''default doc string
:param type name: description
:returns: type name: description
'''
dim = len(rho.shape)
if dim == 2:
ix,jy = rho.shape
for j in range(jy):
line = ''
if SGLaue in ['3','3m1','31m','6/m','6/mmm']:
line += (jy-j)*' '
for i in range(ix):
r = int(100*rho[i,j]/rhoMax)
line += '%4d'%(r)
print (line+'\n')
else:
ix,jy,kz = rho.shape
for k in range(kz):
print ('k = %d'%k)
for j in range(jy):
line = ''
if SGLaue in ['3','3m1','31m','6/m','6/mmm']:
line += (jy-j)*' '
for i in range(ix):
r = int(100*rho[i,j,k]/rhoMax)
line += '%4d'%(r)
print (line+'\n')
## keep this
def findOffset(SGData,A,Fhkl):
'''default doc string
:param type name: description
:returns: type name: description
'''
if SGData['SpGrp'] == 'P 1':
return [0,0,0]
hklShape = Fhkl.shape
hklHalf = np.array(hklShape)/2
sortHKL = np.argsort(Fhkl.flatten())
Fdict = {}
for hkl in sortHKL:
HKL = np.unravel_index(hkl,hklShape)
F = Fhkl[HKL[0]][HKL[1]][HKL[2]]
if F == 0.:
break
Fdict['%.6f'%(np.absolute(F))] = hkl
Flist = np.flipud(np.sort(list(Fdict.keys())))
F = str(1.e6)
i = 0
DH = []
Dphi = []
Hmax = 2*np.asarray(G2lat.getHKLmax(3.5,SGData,A),dtype='i')
for F in Flist:
hkl = np.unravel_index(Fdict[F],hklShape)
if np.any(np.abs(hkl-hklHalf)-Hmax > 0):
continue
iabsnt,mulp,Uniq,Phi = G2spc.GenHKLf(list(hkl-hklHalf),SGData)
Uniq = np.array(Uniq,dtype='i')
Phi = np.array(Phi)
Uniq = np.concatenate((Uniq,-Uniq))+hklHalf # put in Friedel pairs & make as index to Farray
Phi = np.concatenate((Phi,-Phi)) # and their phase shifts
Fh0 = Fhkl[hkl[0],hkl[1],hkl[2]]
ang0 = np.angle(Fh0,deg=True)/360.
for H,phi in list(zip(Uniq,Phi))[1:]:
ang = (np.angle(Fhkl[int(H[0]),int(H[1]),int(H[2])],deg=True)/360.-phi)
dH = H-hkl
dang = ang-ang0
DH.append(dH)
Dphi.append((dang+.5) % 1.0)
if i > 20 or len(DH) > 30:
break
i += 1
DH = np.array(DH)
print (' map offset no.of terms: %d from %d reflections'%(len(DH),len(Flist)))
Dphi = np.array(Dphi)
steps = np.array(hklShape)
X,Y,Z = np.mgrid[0:1:1./steps[0],0:1:1./steps[1],0:1:1./steps[2]]
XYZ = np.array(list(zip(X.flatten(),Y.flatten(),Z.flatten())))
Dang = (np.dot(XYZ,DH.T)+.5)%1.-Dphi
Mmap = np.reshape(np.sum((Dang)**2,axis=1),newshape=steps)/len(DH)
hist,bins = np.histogram(Mmap,bins=1000)
# for i,item in enumerate(hist[:10]):
# print item,bins[i]
chisq = np.min(Mmap)
DX = -np.array(np.unravel_index(np.argmin(Mmap),Mmap.shape))
print (' map offset chi**2: %.3f, map offset: %d %d %d'%(chisq,DX[0],DX[1],DX[2]))
# print (np.dot(DX,DH.T)+.5)%1.-Dphi
return DX
def ChargeFlip(data,reflDict,pgbar):
'''default doc string
:param type name: description
:returns: type name: description
'''
generalData = data['General']
mapData = generalData['Map']
flipData = generalData['Flip']
FFtable = {}
if 'None' not in flipData['Norm element']:
normElem = flipData['Norm element'].upper()
FFs = G2el.GetFormFactorCoeff(normElem.split('+')[0].split('-')[0])
for ff in FFs:
if ff['Symbol'] == normElem:
FFtable.update(ff)
dmin = flipData['Resolution']
SGData = generalData['SGData']
SGMT = np.array([ops[0].T for ops in SGData['SGOps']])
SGT = np.array([ops[1] for ops in SGData['SGOps']])
cell = generalData['Cell'][1:8]
A = G2lat.cell2A(cell[:6])
Vol = cell[6]
im = 0
if generalData['Modulated'] == True:
im = 1
Hmax = np.asarray(G2lat.getHKLmax(dmin,SGData,A),dtype='i')+1
adjHKLmax(SGData,Hmax)
Ehkl = np.zeros(shape=2*Hmax,dtype='c16') #2X64bits per complex no.
time0 = time.time()
for iref,ref in enumerate(reflDict['RefList']):
dsp = ref[4+im]
if im and ref[3]: #skip super lattice reflections - result is 3D projection
continue
if dsp > dmin:
ff = 0.1*Vol #est. no. atoms for ~10A**3/atom
if FFtable:
SQ = 0.25/dsp**2
ff *= G2el.ScatFac(FFtable,SQ)[0]
if ref[8+im] > 0.: #use only +ve Fobs**2
E = np.sqrt(ref[8+im])/ff
else:
E = 0.
ph = ref[10]
ph = rn.uniform(0.,360.)
Uniq = np.inner(ref[:3],SGMT)
Phi = np.inner(ref[:3],SGT)
for i,hkl in enumerate(Uniq): #uses uniq
hkl = np.asarray(hkl,dtype='i')
dp = 360.*Phi[i] #and phi
a = cosd(ph+dp)
b = sind(ph+dp)
phasep = complex(a,b)
phasem = complex(a,-b)
h,k,l = hkl+Hmax
Ehkl[h,k,l] = E*phasep
h,k,l = -hkl+Hmax
Ehkl[h,k,l] = E*phasem
# Ehkl[Hmax] = 0.00001 #this to preserve F[0,0,0]
testHKL = np.array(flipData['testHKL'])+Hmax
CEhkl = copy.copy(Ehkl)
MEhkl = ma.array(Ehkl,mask=(Ehkl==0.0))
Emask = ma.getmask(MEhkl)
sumE = np.sum(ma.array(np.absolute(CEhkl),mask=Emask))
Ncyc = 0
old = np.seterr(all='raise')
twophases = []
while True:
CErho = np.real(fft.fftn(fft.fftshift(CEhkl)))*(1.+0j)
CEsig = np.std(CErho)
CFrho = np.where(np.real(CErho) >= flipData['k-factor']*CEsig,CErho,-CErho)
CFrho = np.where(np.real(CErho) <= flipData['k-Max']*CEsig,CFrho,-CFrho) #solves U atom problem!
CFhkl = fft.ifftshift(fft.ifftn(CFrho))
CFhkl = np.where(CFhkl,CFhkl,1.0) #avoid divide by zero
phase = CFhkl/np.absolute(CFhkl)
twophases.append([np.angle(phase[h,k,l]) for h,k,l in testHKL])
CEhkl = np.absolute(Ehkl)*phase
Ncyc += 1
sumCF = np.sum(ma.array(np.absolute(CFhkl),mask=Emask))
DEhkl = np.absolute(np.absolute(Ehkl)/sumE-np.absolute(CFhkl)/sumCF)
Rcf = min(100.,np.sum(ma.array(DEhkl,mask=Emask)*100.))
if Rcf < 5.:
break
GoOn = pgbar.Update(Rcf,newmsg='%s%8.3f%s\n%s %d'%('Residual Rcf =',Rcf,'%','No.cycles = ',Ncyc))[0]
if not GoOn or Ncyc > 10000:
break
np.seterr(**old)
print (' Charge flip time: %.4f'%(time.time()-time0),'no. elements: %d'%(Ehkl.size))
CErho = np.real(fft.fftn(fft.fftshift(CEhkl)))/10. #? to get on same scale as e-map
print (' No.cycles = %d Residual Rcf =%8.3f%s Map size: %s'%(Ncyc,Rcf,'%',str(CErho.shape)))
roll = findOffset(SGData,A,CEhkl) #CEhkl needs to be just the observed set, not the full set!
mapData['Rcf'] = Rcf
mapData['rho'] = np.roll(np.roll(np.roll(CErho,roll[0],axis=0),roll[1],axis=1),roll[2],axis=2)
mapData['rhoMax'] = max(np.max(mapData['rho']),-np.min(mapData['rho']))
mapData['minmax'] = [np.max(mapData['rho']),np.min(mapData['rho'])]
mapData['Type'] = reflDict['Type']
return mapData,twophases
def findSSOffset(SGData,SSGData,A,Fhklm):
'''default doc string
:param type name: description
:returns: type name: description
'''
if SGData['SpGrp'] == 'P 1':
return [0,0,0,0]
hklmShape = Fhklm.shape
hklmHalf = np.array(hklmShape)/2
sortHKLM = np.argsort(Fhklm.flatten())
Fdict = {}
for hklm in sortHKLM:
HKLM = np.unravel_index(hklm,hklmShape)
F = Fhklm[HKLM[0]][HKLM[1]][HKLM[2]][HKLM[3]]
if F == 0.:
break
Fdict['%.6f'%(np.absolute(F))] = hklm
Flist = np.flipud(np.sort(list(Fdict.keys())))
F = str(1.e6)
i = 0
DH = []
Dphi = []
SSGMT = np.array([ops[0].T for ops in SSGData['SSGOps']])
SSGT = np.array([ops[1] for ops in SSGData['SSGOps']])
Hmax = 2*np.asarray(G2lat.getHKLmax(3.5,SGData,A),dtype='i')
for F in Flist:
hklm = np.unravel_index(Fdict[F],hklmShape)
if np.any(np.abs(hklm-hklmHalf)[:3]-Hmax > 0):
continue
Uniq = np.inner(hklm-hklmHalf,SSGMT)
Phi = np.inner(hklm-hklmHalf,SSGT)
Uniq = np.concatenate((Uniq,-Uniq))+hklmHalf # put in Friedel pairs & make as index to Farray
Phi = np.concatenate((Phi,-Phi)) # and their phase shifts
Fh0 = Fhklm[hklm[0],hklm[1],hklm[2],hklm[3]]
ang0 = np.angle(Fh0,deg=True)/360.
for H,phi in list(zip(Uniq,Phi))[1:]:
H = np.array(H,dtype=int)
ang = (np.angle(Fhklm[H[0],H[1],H[2],H[3]],deg=True)/360.-phi)
dH = H-hklm
dang = ang-ang0
DH.append(dH)
Dphi.append((dang+.5) % 1.0)
if i > 20 or len(DH) > 30:
break
i += 1
DH = np.array(DH)
print (' map offset no.of terms: %d from %d reflections'%(len(DH),len(Flist)))
Dphi = np.array(Dphi)
steps = np.array(hklmShape)
X,Y,Z,T = np.mgrid[0:1:1./steps[0],0:1:1./steps[1],0:1:1./steps[2],0:1:1./steps[3]]
XYZT = np.array(list(zip(X.flatten(),Y.flatten(),Z.flatten(),T.flatten())))
Dang = (np.dot(XYZT,DH.T)+.5)%1.-Dphi
Mmap = np.reshape(np.sum((Dang)**2,axis=1),newshape=steps)/len(DH)
hist,bins = np.histogram(Mmap,bins=1000)
# for i,item in enumerate(hist[:10]):
# print item,bins[i]
chisq = np.min(Mmap)
DX = -np.array(np.unravel_index(np.argmin(Mmap),Mmap.shape))
print (' map offset chi**2: %.3f, map offset: %d %d %d %d'%(chisq,DX[0],DX[1],DX[2],DX[3]))
# print (np.dot(DX,DH.T)+.5)%1.-Dphi
return DX
def SSChargeFlip(data,reflDict,pgbar):
'''default doc string
:param type name: description
:returns: type name: description
'''
generalData = data['General']
mapData = generalData['Map']
map4DData = {}
flipData = generalData['Flip']
FFtable = {}
if 'None' not in flipData['Norm element']:
normElem = flipData['Norm element'].upper()
FFs = G2el.GetFormFactorCoeff(normElem.split('+')[0].split('-')[0])
for ff in FFs:
if ff['Symbol'] == normElem:
FFtable.update(ff)
dmin = flipData['Resolution']
SGData = generalData['SGData']
SSGData = generalData['SSGData']
SSGMT = np.array([ops[0].T for ops in SSGData['SSGOps']])
SSGT = np.array([ops[1] for ops in SSGData['SSGOps']])
cell = generalData['Cell'][1:8]
A = G2lat.cell2A(cell[:6])
Vol = cell[6]
maxM = 4
Hmax = np.asarray(G2lat.getHKLmax(dmin,SGData,A)+[maxM,],dtype='i')+1
adjHKLmax(SGData,Hmax)
Ehkl = np.zeros(shape=2*Hmax,dtype='c16') #2X64bits per complex no.
time0 = time.time()
for iref,ref in enumerate(reflDict['RefList']):
dsp = ref[5]
if dsp > dmin:
ff = 0.1*Vol #est. no. atoms for ~10A**3/atom
if FFtable:
SQ = 0.25/dsp**2
ff *= G2el.ScatFac(FFtable,SQ)[0]
if ref[9] > 0.: #use only +ve Fobs**2
E = np.sqrt(ref[9])/ff
else:
E = 0.
ph = ref[11]
ph = rn.uniform(0.,360.)
Uniq = np.inner(ref[:4],SSGMT)
Phi = np.inner(ref[:4],SSGT)
for i,hklm in enumerate(Uniq): #uses uniq
hklm = np.asarray(hklm,dtype='i')
dp = 360.*Phi[i] #and phi
a = cosd(ph+dp)
b = sind(ph+dp)
phasep = complex(a,b)
phasem = complex(a,-b)
h,k,l,m = hklm+Hmax
Ehkl[h,k,l,m] = E*phasep
h,k,l,m = -hklm+Hmax #Friedel pair refl.
Ehkl[h,k,l,m] = E*phasem
# Ehkl[Hmax] = 0.00001 #this to preserve F[0,0,0]
CEhkl = copy.copy(Ehkl)
MEhkl = ma.array(Ehkl,mask=(Ehkl==0.0))
Emask = ma.getmask(MEhkl)
sumE = np.sum(ma.array(np.absolute(CEhkl),mask=Emask))
Ncyc = 0
old = np.seterr(all='raise')
while True:
CErho = np.real(fft.fftn(fft.fftshift(CEhkl)))*(1.+0j)
CEsig = np.std(CErho)
CFrho = np.where(np.real(CErho) >= flipData['k-factor']*CEsig,CErho,-CErho)
CFrho = np.where(np.real(CErho) <= flipData['k-Max']*CEsig,CFrho,-CFrho) #solves U atom problem!
CFhkl = fft.ifftshift(fft.ifftn(CFrho))
CFhkl = np.where(CFhkl,CFhkl,1.0) #avoid divide by zero
phase = CFhkl/np.absolute(CFhkl)
CEhkl = np.absolute(Ehkl)*phase
Ncyc += 1
sumCF = np.sum(ma.array(np.absolute(CFhkl),mask=Emask))
DEhkl = np.absolute(np.absolute(Ehkl)/sumE-np.absolute(CFhkl)/sumCF)
Rcf = min(100.,np.sum(ma.array(DEhkl,mask=Emask)*100.))
if Rcf < 5.:
break
GoOn = pgbar.Update(Rcf,newmsg='%s%8.3f%s\n%s %d'%('Residual Rcf =',Rcf,'%','No.cycles = ',Ncyc))[0]
if not GoOn or Ncyc > 10000:
break
np.seterr(**old)
print (' Charge flip time: %.4f no. elements: %d'%(time.time()-time0,Ehkl.size))
CErho = np.real(fft.fftn(fft.fftshift(CEhkl[:,:,:,maxM+1])))/10. #? to get on same scale as e-map
SSrho = np.real(fft.fftn(fft.fftshift(CEhkl)))/10. #? ditto
print (' No.cycles = %d Residual Rcf =%8.3f%s Map size: %s'%(Ncyc,Rcf,'%',str(CErho.shape)))
roll = findSSOffset(SGData,SSGData,A,CEhkl) #CEhkl needs to be just the observed set, not the full set!
mapData['Rcf'] = Rcf
mapData['rho'] = np.roll(np.roll(np.roll(CErho,roll[0],axis=0),roll[1],axis=1),roll[2],axis=2)
mapData['rhoMax'] = max(np.max(mapData['rho']),-np.min(mapData['rho']))
mapData['minmax'] = [np.max(mapData['rho']),np.min(mapData['rho'])]
mapData['Type'] = reflDict['Type']
map4DData['Rcf'] = Rcf
map4DData['rho'] = np.real(np.roll(np.roll(np.roll(np.roll(SSrho,roll[0],axis=0),roll[1],axis=1),roll[2],axis=2),roll[3],axis=3))
map4DData['rhoMax'] = max(np.max(map4DData['rho']),-np.min(map4DData['rho']))
map4DData['minmax'] = [np.max(map4DData['rho']),np.min(map4DData['rho'])]
map4DData['Type'] = reflDict['Type']
return mapData,map4DData
def getRho(xyz,mapData):
''' get scattering density at a point by 8-point interpolation
param xyz: coordinate to be probed
param: mapData: dict of map data
:returns: density at xyz
'''
rollMap = lambda rho,roll: np.roll(np.roll(np.roll(rho,roll[0],axis=0),roll[1],axis=1),roll[2],axis=2)
if not len(mapData):
return 0.0
rho = copy.copy(mapData['rho']) #don't mess up original
if not len(rho):
return 0.0
mapShape = np.array(rho.shape)
mapStep = 1./mapShape
X = np.array(xyz)%1. #get into unit cell
I = np.array(X*mapShape,dtype='int')
D = X-I*mapStep #position inside map cell
D12 = D[0]*D[1]
D13 = D[0]*D[2]
D23 = D[1]*D[2]
D123 = np.prod(D)
Rho = rollMap(rho,-I) #shifts map so point is in corner
R = Rho[0,0,0]*(1.-np.sum(D))+Rho[1,0,0]*D[0]+Rho[0,1,0]*D[1]+Rho[0,0,1]*D[2]+ \
Rho[1,1,1]*D123+Rho[0,1,1]*(D23-D123)+Rho[1,0,1]*(D13-D123)+Rho[1,1,0]*(D12-D123)+ \
Rho[0,0,0]*(D12+D13+D23-D123)-Rho[0,0,1]*(D13+D23-D123)- \
Rho[0,1,0]*(D23+D12-D123)-Rho[1,0,0]*(D13+D12-D123)
return R
def SearchMap(generalData,drawingData,Neg=False):
'''Does a search of a density map for peaks meeting the criterion of peak
height is greater than mapData['cutOff']/100 of mapData['rhoMax'] where
mapData is data['General']['mapData']; the map is also in mapData.
:param generalData: the phase data structure; includes the map
:param drawingData: the drawing data structure
:param Neg: if True then search for negative peaks (i.e. H-atoms & neutron data)
:returns: (peaks,mags,dzeros) where
* peaks : ndarray
x,y,z positions of the peaks found in the map
* mags : ndarray
the magnitudes of the peaks
* dzeros : ndarray
the distance of the peaks from the unit cell origin
* dcent : ndarray
the distance of the peaks from the unit cell center
'''
rollMap = lambda rho,roll: np.roll(np.roll(np.roll(rho,roll[0],axis=0),roll[1],axis=1),roll[2],axis=2)
norm = 1./(np.sqrt(3.)*np.sqrt(2.*np.pi)**3)
# def noDuplicate(xyz,peaks,Amat):
# XYZ = np.inner(Amat,xyz)
# if True in [np.allclose(XYZ,np.inner(Amat,peak),atol=0.5) for peak in peaks]:
# print ' Peak',xyz,' <0.5A from another peak'
# return False
# return True
#
def fixSpecialPos(xyz,SGData,Amat):
equivs = G2spc.GenAtom(xyz,SGData,Move=True)
X = []
xyzs = [equiv[0] for equiv in equivs]
for x in xyzs:
if np.sqrt(np.sum(np.inner(Amat,xyz-x)**2,axis=0))<0.5:
X.append(x)
if len(X) > 1:
return np.average(X,axis=0)
else:
return xyz
def rhoCalc(parms,rX,rY,rZ,res,SGLaue):
Mag,x0,y0,z0,sig = parms
z = -((x0-rX)**2+(y0-rY)**2+(z0-rZ)**2)/(2.*sig**2)
# return norm*Mag*np.exp(z)/(sig*res**3) #not slower but some faults in LS
return norm*Mag*(1.+z+z**2/2.)/(sig*res**3)
def peakFunc(parms,rX,rY,rZ,rho,res,SGLaue):
Mag,x0,y0,z0,sig = parms
M = rho-rhoCalc(parms,rX,rY,rZ,res,SGLaue)
return M
def peakHess(parms,rX,rY,rZ,rho,res,SGLaue):
Mag,x0,y0,z0,sig = parms
dMdv = np.zeros(([5,]+list(rX.shape)))
delt = .01
for i in range(5):
parms[i] -= delt
rhoCm = rhoCalc(parms,rX,rY,rZ,res,SGLaue)
parms[i] += 2.*delt
rhoCp = rhoCalc(parms,rX,rY,rZ,res,SGLaue)
parms[i] -= delt
dMdv[i] = (rhoCp-rhoCm)/(2.*delt)
rhoC = rhoCalc(parms,rX,rY,rZ,res,SGLaue)
Vec = np.sum(np.sum(np.sum(dMdv*(rho-rhoC),axis=3),axis=2),axis=1)
dMdv = np.reshape(dMdv,(5,rX.size))
Hess = np.inner(dMdv,dMdv)
return Vec,Hess
SGData = generalData['SGData']
Amat,Bmat = G2lat.cell2AB(generalData['Cell'][1:7])
peaks = []
mags = []
dzeros = []
dcent = []
try:
mapData = generalData['Map']
contLevel = mapData['cutOff']*mapData['rhoMax']/100.
if Neg:
rho = -copy.copy(mapData['rho']) #flip +/-
else:
rho = copy.copy(mapData['rho']) #don't mess up original
mapHalf = np.array(rho.shape)/2
res = mapData['Resolution']
incre = np.array(rho.shape,dtype=np.float)
step = max(1.0,1./res)+1
steps = np.array((3*[step,]),dtype='int32')
except KeyError:
print ('**** ERROR - Fourier map not defined')
return peaks,mags
rhoMask = ma.array(rho,mask=(rho<contLevel))
indices = (-1,0,1)
rolls = np.array([[h,k,l] for h in indices for k in indices for l in indices])
for roll in rolls:
if np.any(roll):
rhoMask = ma.array(rhoMask,mask=(rhoMask-rollMap(rho,roll)<=0.))
indx = np.transpose(rhoMask.nonzero())
peaks = indx/incre
mags = rhoMask[rhoMask.nonzero()]
for i,[ind,peak,mag] in enumerate(zip(indx,peaks,mags)):
rho = rollMap(rho,ind)
rMM = mapHalf-steps
rMP = mapHalf+steps+1
rhoPeak = rho[int(rMM[0]):int(rMP[0]),int(rMM[1]):int(rMP[1]),int(rMM[2]):int(rMP[2])]
peakInt = np.sum(rhoPeak)*res**3
rX,rY,rZ = np.mgrid[int(rMM[0]):int(rMP[0]),int(rMM[1]):int(rMP[1]),int(rMM[2]):int(rMP[2])]
x0 = [peakInt,mapHalf[0],mapHalf[1],mapHalf[2],2.0] #magnitude, position & width(sig)
result = HessianLSQ(peakFunc,x0,Hess=peakHess,
args=(rX,rY,rZ,rhoPeak,res,SGData['SGLaue']),ftol=.01,maxcyc=10)
x1 = result[0]
if not np.any(x1 < 0):
peak = (np.array(x1[1:4])-ind)/incre
peak = fixSpecialPos(peak,SGData,Amat)
rho = rollMap(rho,-ind)
cent = np.ones(3)*.5
dzeros = np.sqrt(np.sum(np.inner(Amat,peaks)**2,axis=0))
dcent = np.sqrt(np.sum(np.inner(Amat,peaks-cent)**2,axis=0))
if Neg: #want negative magnitudes for negative peaks
return np.array(peaks),-np.array([mags,]).T,np.array([dzeros,]).T,np.array([dcent,]).T
else:
return np.array(peaks),np.array([mags,]).T,np.array([dzeros,]).T,np.array([dcent,]).T
def sortArray(data,pos,reverse=False):
'''data is a list of items
sort by pos in list; reverse if True
'''
T = []
for i,M in enumerate(data):
try:
T.append((M[pos],i))
except IndexError:
return data
D = dict(zip(T,data))
T.sort()
if reverse:
T.reverse()
X = []
for key in T:
X.append(D[key])
return X
def PeaksEquiv(data,Ind):
'''Find the equivalent map peaks for those selected. Works on the
contents of data['Map Peaks'].
:param data: the phase data structure
:param list Ind: list of selected peak indices
:returns: augmented list of peaks including those related by symmetry to the
ones in Ind
'''
def Duplicate(xyz,peaks,Amat):
if True in [np.allclose(np.inner(Amat,xyz),np.inner(Amat,peak),atol=0.5) for peak in peaks]:
return True
return False
generalData = data['General']
Amat,Bmat = G2lat.cell2AB(generalData['Cell'][1:7])
SGData = generalData['SGData']
mapPeaks = data['Map Peaks']
XYZ = np.array([xyz[1:4] for xyz in mapPeaks])
Indx = {}
for ind in Ind:
xyz = np.array(mapPeaks[ind][1:4])
xyzs = np.array([equiv[0] for equiv in G2spc.GenAtom(xyz,SGData,Move=True)])
for jnd,xyz in enumerate(XYZ):
Indx[jnd] = Duplicate(xyz,xyzs,Amat)
Ind = []
for ind in Indx:
if Indx[ind]:
Ind.append(ind)
return Ind
def PeaksUnique(data,Ind):
'''Finds the symmetry unique set of peaks from those selected. Works on the
contents of data['Map Peaks'].
:param data: the phase data structure
:param list Ind: list of selected peak indices
:returns: the list of symmetry unique peaks from among those given in Ind
'''
# XYZE = np.array([[equiv[0] for equiv in G2spc.GenAtom(xyz[1:4],SGData,Move=True)] for xyz in mapPeaks]) #keep this!!
def noDuplicate(xyz,peaks,Amat):
if True in [np.allclose(np.inner(Amat,xyz),np.inner(Amat,peak),atol=0.5) for peak in peaks]:
return False
return True
generalData = data['General']
Amat,Bmat = G2lat.cell2AB(generalData['Cell'][1:7])
SGData = generalData['SGData']
mapPeaks = data['Map Peaks']
Indx = {}
XYZ = {}
for ind in Ind:
XYZ[ind] = np.array(mapPeaks[ind][1:4])
Indx[ind] = True
for ind in Ind:
if Indx[ind]:
xyz = XYZ[ind]
for jnd in Ind:
if ind != jnd and Indx[jnd]:
Equiv = G2spc.GenAtom(XYZ[jnd],SGData,Move=True)
xyzs = np.array([equiv[0] for equiv in Equiv])
Indx[jnd] = noDuplicate(xyz,xyzs,Amat)
Ind = []
for ind in Indx:
if Indx[ind]:
Ind.append(ind)
return Ind
################################################################################
##### single peak fitting profile fxn stuff
################################################################################
def getCWsig(ins,pos):
'''get CW peak profile sigma^2
:param dict ins: instrument parameters with at least 'U', 'V', & 'W'
as values only
:param float pos: 2-theta of peak
:returns: float getCWsig: peak sigma^2
'''
tp = tand(pos/2.0)
return ins['U']*tp**2+ins['V']*tp+ins['W']
def getCWsigDeriv(pos):
'''get derivatives of CW peak profile sigma^2 wrt U,V, & W
:param float pos: 2-theta of peak
:returns: list getCWsigDeriv: d(sig^2)/dU, d(sig)/dV & d(sig)/dW
'''
tp = tand(pos/2.0)
return tp**2,tp,1.0
def getCWgam(ins,pos):
'''get CW peak profile gamma
:param dict ins: instrument parameters with at least 'X', 'Y' & 'Z'
as values only
:param float pos: 2-theta of peak
:returns: float getCWgam: peak gamma
'''
return ins['X']/cosd(pos/2.0)+ins['Y']*tand(pos/2.0)+ins['Z']
def getCWgamDeriv(pos):
'''get derivatives of CW peak profile gamma wrt X, Y & Z
:param float pos: 2-theta of peak
:returns: list getCWgamDeriv: d(gam)/dX & d(gam)/dY
'''
return 1./cosd(pos/2.0),tand(pos/2.0),1.0
def getTOFsig(ins,dsp):
'''get TOF peak profile sigma^2
:param dict ins: instrument parameters with at least 'sig-0', 'sig-1' & 'sig-q'
as values only
:param float dsp: d-spacing of peak
:returns: float getTOFsig: peak sigma^2
'''
return ins['sig-0']+ins['sig-1']*dsp**2+ins['sig-2']*dsp**4+ins['sig-q']*dsp
def getTOFsigDeriv(dsp):
'''get derivatives of TOF peak profile sigma^2 wrt sig-0, sig-1, & sig-q
:param float dsp: d-spacing of peak
:returns: list getTOFsigDeriv: d(sig0/d(sig-0), d(sig)/d(sig-1) & d(sig)/d(sig-q)
'''
return 1.0,dsp**2,dsp**4,dsp
def getTOFgamma(ins,dsp):
'''get TOF peak profile gamma
:param dict ins: instrument parameters with at least 'X', 'Y' & 'Z'
as values only
:param float dsp: d-spacing of peak
:returns: float getTOFgamma: peak gamma
'''
return ins['Z']+ins['X']*dsp+ins['Y']*dsp**2
def getTOFgammaDeriv(dsp):
'''get derivatives of TOF peak profile gamma wrt X, Y & Z
:param float dsp: d-spacing of peak
:returns: list getTOFgammaDeriv: d(gam)/dX & d(gam)/dY
'''
return dsp,dsp**2,1.0
def getTOFbeta(ins,dsp):
'''get TOF peak profile beta
:param dict ins: instrument parameters with at least 'beat-0', 'beta-1' & 'beta-q'
as values only
:param float dsp: d-spacing of peak
:returns: float getTOFbeta: peak beat
'''
return ins['beta-0']+ins['beta-1']/dsp**4+ins['beta-q']/dsp**2
def getTOFbetaDeriv(dsp):
'''get derivatives of TOF peak profile beta wrt beta-0, beta-1, & beat-q
:param float dsp: d-spacing of peak
:returns: list getTOFbetaDeriv: d(beta)/d(beat-0), d(beta)/d(beta-1) & d(beta)/d(beta-q)
'''
return 1.0,1./dsp**4,1./dsp**2
def getTOFalpha(ins,dsp):
'''get TOF peak profile alpha
:param dict ins: instrument parameters with at least 'alpha'
as values only
:param float dsp: d-spacing of peak
:returns: flaot getTOFalpha: peak alpha
'''
return ins['alpha']/dsp
def getTOFalphaDeriv(dsp):
'''get derivatives of TOF peak profile beta wrt alpha
:param float dsp: d-spacing of peak
:returns: float getTOFalphaDeriv: d(alp)/d(alpha)
'''
return 1./dsp
def setPeakparms(Parms,Parms2,pos,mag,ifQ=False,useFit=False):
'''set starting peak parameters for single peak fits from plot selection or auto selection
:param dict Parms: instrument parameters dictionary
:param dict Parms2: table lookup for TOF profile coefficients
:param float pos: peak position in 2-theta, TOF or Q (ifQ=True)
:param float mag: peak top magnitude from pick
:param bool ifQ: True if pos in Q
:param bool useFit: True if use fitted CW Parms values (not defaults)
:returns: list XY: peak list entry:
for CW: [pos,0,mag,1,sig,0,gam,0]
for TOF: [pos,0,mag,1,alp,0,bet,0,sig,0,gam,0]
NB: mag refinement set by default, all others off
'''
ind = 0
if useFit:
ind = 1
ins = {}
if 'C' in Parms['Type'][0]: #CW data - TOF later in an else
for x in ['U','V','W','X','Y','Z']:
ins[x] = Parms[x][ind]
if ifQ: #qplot - convert back to 2-theta
pos = 2.0*asind(pos*getWave(Parms)/(4*math.pi))
sig = getCWsig(ins,pos)
gam = getCWgam(ins,pos)
XY = [pos,0, mag,1, sig,0, gam,0] #default refine intensity 1st
else:
if ifQ:
dsp = 2.*np.pi/pos
pos = Parms['difC']*dsp
else:
dsp = pos/Parms['difC'][1]
if 'Pdabc' in Parms2:
for x in ['sig-0','sig-1','sig-2','sig-q','X','Y','Z']:
ins[x] = Parms[x][ind]
Pdabc = Parms2['Pdabc'].T
alp = np.interp(dsp,Pdabc[0],Pdabc[1])
bet = np.interp(dsp,Pdabc[0],Pdabc[2])
else:
for x in ['alpha','beta-0','beta-1','beta-q','sig-0','sig-1','sig-2','sig-q','X','Y','Z']:
ins[x] = Parms[x][ind]
alp = getTOFalpha(ins,dsp)
bet = getTOFbeta(ins,dsp)
sig = getTOFsig(ins,dsp)
gam = getTOFgamma(ins,dsp)
XY = [pos,0,mag,1,alp,0,bet,0,sig,0,gam,0]
return XY
################################################################################
##### MC/SA stuff
################################################################################
#scipy/optimize/anneal.py code modified by R. Von Dreele 2013
# Original Author: Travis Oliphant 2002
# Bug-fixes in 2006 by Tim Leslie
import numpy
from numpy import asarray, exp, squeeze, sign, \
all, shape, array, where
from numpy import random
#__all__ = ['anneal']
_double_min = numpy.finfo(float).min
_double_max = numpy.finfo(float).max
class base_schedule(object):
def __init__(self):
self.dwell = 20
self.lower = 0.
self.upper = 1.
self.Ninit = 50
self.accepted = 0
self.tests = 0
self.feval = 0
self.k = 0
self.T = None
def init(self, **options):
self.__dict__.update(options)
self.lower = asarray(self.lower)
self.lower = where(self.lower == numpy.NINF, -_double_max, self.lower)
self.upper = asarray(self.upper)
self.upper = where(self.upper == numpy.PINF, _double_max, self.upper)
self.k = 0
self.accepted = 0
self.feval = 0
self.tests = 0
def getstart_temp(self, best_state):
""" Find a matching starting temperature and starting parameters vector
i.e. find x0 such that func(x0) = T0.
:param best_state: _state
A _state object to store the function value and x0 found.
:returns: x0 : array
The starting parameters vector.
"""
assert(not self.dims is None)
lrange = self.lower
urange = self.upper
fmax = _double_min
fmin = _double_max
for _ in range(self.Ninit):
x0 = random.uniform(size=self.dims)*(urange-lrange) + lrange
fval = self.func(x0, *self.args)
self.feval += 1
if fval > fmax:
fmax = fval
if fval < fmin:
fmin = fval
best_state.cost = fval
best_state.x = array(x0)
self.T0 = (fmax-fmin)*1.5
return best_state.x
def set_range(self,x0,frac):
delrange = frac*(self.upper-self.lower)
self.upper = x0+delrange
self.lower = x0-delrange
def accept_test(self, dE):
T = self.T
self.tests += 1
if dE < 0:
self.accepted += 1
return 1
p = exp(-dE*1.0/T)
if (p > random.uniform(0.0, 1.0)):
self.accepted += 1
return 1
return 0
def update_guess(self, x0):
return np.squeeze(np.random.uniform(0.,1.,size=self.dims))*(self.upper-self.lower)+self.lower
def update_temp(self, x0):
pass
class fast_sa(base_schedule):
def init(self, **options):
self.__dict__.update(options)
def update_guess(self, x0):
x0 = asarray(x0)
u = squeeze(random.uniform(0.0, 1.0, size=self.dims))
T = self.T
xc = (sign(u-0.5)*T*((1+1.0/T)**abs(2*u-1)-1.0)+1.0)/2.0
xnew = xc*(self.upper - self.lower)+self.lower
return xnew
def update_temp(self):
self.T = self.T0*exp(-self.c * self.k**(self.quench))
self.k += 1
return
class log_sa(base_schedule): #OK
def init(self,**options):
self.__dict__.update(options)
def update_guess(self,x0): #same as default #TODO - is this a reasonable update procedure?
u = squeeze(random.uniform(0.0, 1.0, size=self.dims))
T = self.T
xc = (sign(u-0.5)*T*((1+1.0/T)**abs(2*u-1)-1.0)+1.0)/2.0
xnew = xc*(self.upper - self.lower)+self.lower
return xnew
def update_temp(self):
self.k += 1
self.T = self.T0*self.slope**self.k
class _state(object):
def __init__(self):
self.x = None
self.cost = None
def makeTsched(data):
if data['Algorithm'] == 'fast':
sched = fast_sa()
sched.quench = data['fast parms'][0]
sched.c = data['fast parms'][1]
elif data['Algorithm'] == 'log':
sched = log_sa()
sched.slope = data['log slope']
sched.T0 = data['Annealing'][0]
if not sched.T0:
sched.T0 = 50.
Tf = data['Annealing'][1]
if not Tf:
Tf = 0.001
Tsched = [sched.T0,]
while Tsched[-1] > Tf:
sched.update_temp()
Tsched.append(sched.T)
return Tsched[1:]
def anneal(func, x0, args=(), schedule='fast',
T0=None, Tf=1e-12, maxeval=None, maxaccept=None, maxiter=400,
feps=1e-6, quench=1.0, c=1.0,
lower=-100, upper=100, dwell=50, slope=0.9,ranStart=False,
ranRange=0.10,autoRan=False,dlg=None):
"""Minimize a function using simulated annealing.
Schedule is a schedule class implementing the annealing schedule.
Available ones are 'fast', 'cauchy', 'boltzmann'
:param callable func: f(x, \*args)
Function to be optimized.
:param ndarray x0:
Initial guess.
:param tuple args:
Extra parameters to `func`.
:param base_schedule schedule:
Annealing schedule to use (a class).
:param float T0:
Initial Temperature (estimated as 1.2 times the largest
cost-function deviation over random points in the range).
:param float Tf:
Final goal temperature.
:param int maxeval:
Maximum function evaluations.
:param int maxaccept:
Maximum changes to accept.
:param int maxiter:
Maximum cooling iterations.
:param float feps:
Stopping relative error tolerance for the function value in
last four coolings.
:param float quench,c:
Parameters to alter fast_sa schedule.
:param float/ndarray lower,upper:
Lower and upper bounds on `x`.
:param int dwell:
The number of times to search the space at each temperature.
:param float slope:
Parameter for log schedule
:param bool ranStart=False:
True for set 10% of ranges about x
:returns: (xmin, Jmin, T, feval, iters, accept, retval) where
* xmin (ndarray): Point giving smallest value found.
* Jmin (float): Minimum value of function found.
* T (float): Final temperature.
* feval (int): Number of function evaluations.
* iters (int): Number of cooling iterations.
* accept (int): Number of tests accepted.
* retval (int): Flag indicating stopping condition:
* 0: Points no longer changing
* 1: Cooled to final temperature
* 2: Maximum function evaluations
* 3: Maximum cooling iterations reached
* 4: Maximum accepted query locations reached
* 5: Final point not the minimum amongst encountered points
*Notes*:
Simulated annealing is a random algorithm which uses no derivative
information from the function being optimized. In practice it has
been more useful in discrete optimization than continuous
optimization, as there are usually better algorithms for continuous
optimization problems.
Some experimentation by trying the difference temperature
schedules and altering their parameters is likely required to
obtain good performance.
The randomness in the algorithm comes from random sampling in numpy.
To obtain the same results you can call numpy.random.seed with the
same seed immediately before calling scipy.optimize.anneal.
We give a brief description of how the three temperature schedules
generate new points and vary their temperature. Temperatures are
only updated with iterations in the outer loop. The inner loop is
over range(dwell), and new points are generated for
every iteration in the inner loop. (Though whether the proposed
new points are accepted is probabilistic.)
For readability, let d denote the dimension of the inputs to func.
Also, let x_old denote the previous state, and k denote the
iteration number of the outer loop. All other variables not
defined below are input variables to scipy.optimize.anneal itself.
In the 'fast' schedule the updates are ::
u ~ Uniform(0, 1, size=d)
y = sgn(u - 0.5) * T * ((1+ 1/T)**abs(2u-1) -1.0)
xc = y * (upper - lower)
x_new = x_old + xc
T_new = T0 * exp(-c * k**quench)
"""
''' Scipy license:
Copyright (c) 2001, 2002 Enthought, Inc.
All rights reserved.
Copyright (c) 2003-2016 SciPy Developers.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
a. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
b. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
c. Neither the name of Enthought nor the names of the SciPy Developers
may be used to endorse or promote products derived from this software
without specific prior written permission.
'''
x0 = asarray(x0)
lower = asarray(lower)
upper = asarray(upper)
schedule = eval(schedule+'_sa()')
# initialize the schedule
schedule.init(dims=shape(x0),func=func,args=args,T0=T0,lower=lower, upper=upper,
c=c, quench=quench, dwell=dwell, slope=slope)
current_state, last_state, best_state = _state(), _state(), _state()
if ranStart:
schedule.set_range(x0,ranRange)
if T0 is None:
x0 = schedule.getstart_temp(best_state)
else:
x0 = random.uniform(size=len(x0))*(upper-lower) + lower
best_state.x = None
best_state.cost = numpy.Inf
last_state.x = asarray(x0).copy()
fval = func(x0,*args)
schedule.feval += 1
last_state.cost = fval
if last_state.cost < best_state.cost:
best_state.cost = fval
best_state.x = asarray(x0).copy()
schedule.T = schedule.T0
fqueue = [100, 300, 500, 700]
iters = 1
keepGoing = True
bestn = 0
while keepGoing:
retval = 0
for n in range(dwell):
current_state.x = schedule.update_guess(last_state.x)
current_state.cost = func(current_state.x,*args)
schedule.feval += 1
dE = current_state.cost - last_state.cost
if schedule.accept_test(dE):
last_state.x = current_state.x.copy()
last_state.cost = current_state.cost
if last_state.cost < best_state.cost:
best_state.x = last_state.x.copy()
best_state.cost = last_state.cost
bestn = n
if best_state.cost < 1.0 and autoRan:
schedule.set_range(x0,best_state.cost/2.)
if dlg:
GoOn = dlg.Update(min(100.,best_state.cost*100),
newmsg='%s%8.5f, %s%d\n%s%8.4f%s'%('Temperature =',schedule.T, \
'Best trial:',bestn, \
'MC/SA Residual:',best_state.cost*100,'%', \
))[0]
if not GoOn:
best_state.x = last_state.x.copy()
best_state.cost = last_state.cost
retval = 5
schedule.update_temp()
iters += 1
# Stopping conditions
# 0) last saved values of f from each cooling step
# are all very similar (effectively cooled)
# 1) Tf is set and we are below it
# 2) maxeval is set and we are past it
# 3) maxiter is set and we are past it
# 4) maxaccept is set and we are past it
# 5) user canceled run via progress bar
fqueue.append(squeeze(last_state.cost))
fqueue.pop(0)
af = asarray(fqueue)*1.0
if retval == 5:
print (' User terminated run; incomplete MC/SA')
keepGoing = False
break
if all(abs((af-af[0])/af[0]) < feps):
retval = 0
if abs(af[-1]-best_state.cost) > feps*10:
retval = 5
print (" Warning: Cooled to %.4f > selected Tmin %.4f in %d steps"%(squeeze(last_state.cost),Tf,iters-1))
break
if (Tf is not None) and (schedule.T < Tf):
# print ' Minimum T reached in %d steps'%(iters-1)
retval = 1
break
if (maxeval is not None) and (schedule.feval > maxeval):
retval = 2
break
if (iters > maxiter):
print (" Warning: Maximum number of iterations exceeded.")
retval = 3
break
if (maxaccept is not None) and (schedule.accepted > maxaccept):
retval = 4
break
return best_state.x, best_state.cost, schedule.T, \
schedule.feval, iters, schedule.accepted, retval
def worker(iCyc,data,RBdata,reflType,reflData,covData,out_q,out_t,out_n,nprocess=-1):
outlist = []
timelist = []
nsflist = []
random.seed(int(time.time())%100000+nprocess) #make sure each process has a different random start
for n in range(iCyc):
result = mcsaSearch(data,RBdata,reflType,reflData,covData,None,False) #mcsa result,time,rcov
outlist.append(result[0])
timelist.append(result[1])
nsflist.append(result[2])
print (' MC/SA final fit: %.3f%% structure factor time: %.3f'%(100*result[0][2],result[1]))
out_q.put(outlist)
out_t.put(timelist)
out_n.put(nsflist)
def MPmcsaSearch(nCyc,data,RBdata,reflType,reflData,covData,nprocs):
import multiprocessing as mp
out_q = mp.Queue()
out_t = mp.Queue()
out_n = mp.Queue()
procs = []
totsftime = 0.
totnsf = 0
iCyc = np.zeros(nprocs)
for i in range(nCyc):
iCyc[i%nprocs] += 1
for i in range(nprocs):
p = mp.Process(target=worker,args=(int(iCyc[i]),data,RBdata,reflType,reflData,covData,out_q,out_t,out_n,i))
procs.append(p)
p.start()
resultlist = []
for i in range(nprocs):
resultlist += out_q.get()
totsftime += np.sum(out_t.get())
totnsf += np.sum(out_n.get())
for p in procs:
p.join()
return resultlist,totsftime,totnsf
def mcsaSearch(data,RBdata,reflType,reflData,covData,pgbar,start=True):
'''default doc string
:param type name: description
:returns: type name: description
'''
class RandomDisplacementBounds(object):
"""random displacement with bounds"""
def __init__(self, xmin, xmax, stepsize=0.5):
self.xmin = xmin
self.xmax = xmax
self.stepsize = stepsize
def __call__(self, x):
"""take a random step but ensure the new position is within the bounds"""
while True:
# this could be done in a much more clever way, but it will work for example purposes
steps = self.xmax-self.xmin
xnew = x + np.random.uniform(-self.stepsize*steps, self.stepsize*steps, np.shape(x))
if np.all(xnew < self.xmax) and np.all(xnew > self.xmin):
break
return xnew
global tsum,nsum
tsum = 0.
nsum = 0
def getMDparms(item,pfx,parmDict,varyList):
parmDict[pfx+'MDaxis'] = item['axis']
parmDict[pfx+'MDval'] = item['Coef'][0]
if item['Coef'][1]:
varyList += [pfx+'MDval',]
limits = item['Coef'][2]
lower.append(limits[0])
upper.append(limits[1])
def getAtomparms(item,pfx,aTypes,SGData,parmDict,varyList):
parmDict[pfx+'Atype'] = item['atType']
aTypes |= set([item['atType'],])
pstr = ['Ax','Ay','Az']
XYZ = [0,0,0]
for i in range(3):
name = pfx+pstr[i]
parmDict[name] = item['Pos'][0][i]
XYZ[i] = parmDict[name]
if item['Pos'][1][i]:
varyList += [name,]
limits = item['Pos'][2][i]
lower.append(limits[0])
upper.append(limits[1])
parmDict[pfx+'Amul'] = len(G2spc.GenAtom(XYZ,SGData))
def getRBparms(item,mfx,aTypes,RBdata,SGData,atNo,parmDict,varyList):
parmDict[mfx+'MolCent'] = item['MolCent']
parmDict[mfx+'RBId'] = item['RBId']
pstr = ['Px','Py','Pz']
ostr = ['Qa','Qi','Qj','Qk'] #angle,vector not quaternion
for i in range(3):
name = mfx+pstr[i]
parmDict[name] = item['Pos'][0][i]
if item['Pos'][1][i]:
varyList += [name,]
limits = item['Pos'][2][i]
lower.append(limits[0])
upper.append(limits[1])
AV = item['Ori'][0]
A = AV[0]
V = AV[1:]
for i in range(4):
name = mfx+ostr[i]
if i:
parmDict[name] = V[i-1]
else:
parmDict[name] = A
if item['Ovar'] == 'AV':
varyList += [name,]
limits = item['Ori'][2][i]
lower.append(limits[0])
upper.append(limits[1])
elif item['Ovar'] == 'A' and not i:
varyList += [name,]
limits = item['Ori'][2][i]
lower.append(limits[0])
upper.append(limits[1])
if 'Tor' in item: #'Tor' not there for 'Vector' RBs
for i in range(len(item['Tor'][0])):
name = mfx+'Tor'+str(i)
parmDict[name] = item['Tor'][0][i]
if item['Tor'][1][i]:
varyList += [name,]
limits = item['Tor'][2][i]
lower.append(limits[0])
upper.append(limits[1])
atypes = RBdata[item['Type']][item['RBId']]['rbTypes']
aTypes |= set(atypes)
atNo += len(atypes)
return atNo
def GetAtomM(Xdata,SGData):
Mdata = []
for xyz in Xdata:
Mdata.append(float(len(G2spc.GenAtom(xyz,SGData))))
return np.array(Mdata)
def GetAtomT(RBdata,parmDict):
'Needs a doc string'
atNo = parmDict['atNo']
nfixAt = parmDict['nfixAt']
Tdata = atNo*[' ',]
for iatm in range(nfixAt):
parm = ':'+str(iatm)+':Atype'
if parm in parmDict:
Tdata[iatm] = aTypes.index(parmDict[parm])
iatm = nfixAt
for iObj in range(parmDict['nObj']):
pfx = str(iObj)+':'
if parmDict[pfx+'Type'] in ['Vector','Residue']:
if parmDict[pfx+'Type'] == 'Vector':
RBRes = RBdata['Vector'][parmDict[pfx+'RBId']]
nAtm = len(RBRes['rbVect'][0])
else: #Residue
RBRes = RBdata['Residue'][parmDict[pfx+'RBId']]
nAtm = len(RBRes['rbXYZ'])
for i in range(nAtm):
Tdata[iatm] = aTypes.index(RBRes['rbTypes'][i])
iatm += 1
elif parmDict[pfx+'Type'] == 'Atom':
atNo = parmDict[pfx+'atNo']
parm = pfx+'Atype' #remove extra ':'
if parm in parmDict:
Tdata[atNo] = aTypes.index(parmDict[parm])
iatm += 1
else:
continue #skips March Dollase
return Tdata
def GetAtomX(RBdata,parmDict):
'Needs a doc string'
Bmat = parmDict['Bmat']
atNo = parmDict['atNo']
nfixAt = parmDict['nfixAt']
Xdata = np.zeros((3,atNo))
keys = {':Ax':Xdata[0],':Ay':Xdata[1],':Az':Xdata[2]}
for iatm in range(nfixAt):
for key in keys:
parm = ':'+str(iatm)+key
if parm in parmDict:
keys[key][iatm] = parmDict[parm]
iatm = nfixAt
for iObj in range(parmDict['nObj']):
pfx = str(iObj)+':'
if parmDict[pfx+'Type'] in ['Vector','Residue']:
if parmDict[pfx+'Type'] == 'Vector':
RBRes = RBdata['Vector'][parmDict[pfx+'RBId']]
vecs = RBRes['rbVect']
mags = RBRes['VectMag']
Cart = np.zeros_like(vecs[0])
for vec,mag in zip(vecs,mags):
Cart += vec*mag
elif parmDict[pfx+'Type'] == 'Residue':
RBRes = RBdata['Residue'][parmDict[pfx+'RBId']]
Cart = np.array(RBRes['rbXYZ'])
for itor,seq in enumerate(RBRes['rbSeq']):
QuatA = AVdeg2Q(parmDict[pfx+'Tor'+str(itor)],Cart[seq[0]]-Cart[seq[1]])
Cart[seq[3]] = prodQVQ(QuatA,Cart[seq[3]]-Cart[seq[1]])+Cart[seq[1]]
if parmDict[pfx+'MolCent'][1]:
Cart -= parmDict[pfx+'MolCent'][0]
Qori = AVdeg2Q(parmDict[pfx+'Qa'],[parmDict[pfx+'Qi'],parmDict[pfx+'Qj'],parmDict[pfx+'Qk']])
Pos = np.array([parmDict[pfx+'Px'],parmDict[pfx+'Py'],parmDict[pfx+'Pz']])
Xdata.T[iatm:iatm+len(Cart)] = np.inner(Bmat,prodQVQ(Qori,Cart)).T+Pos
iatm += len(Cart)
elif parmDict[pfx+'Type'] == 'Atom':
atNo = parmDict[pfx+'atNo']
for key in keys:
parm = pfx+key[1:] #remove extra ':'
if parm in parmDict:
keys[key][atNo] = parmDict[parm]
iatm += 1
else:
continue #skips March Dollase
return Xdata.T
def getAllTX(Tdata,Mdata,Xdata,SGM,SGT):
allX = np.inner(Xdata,SGM)+SGT
allT = np.repeat(Tdata,allX.shape[1])
allM = np.repeat(Mdata,allX.shape[1])
allX = np.reshape(allX,(-1,3))
return allT,allM,allX
def getAllX(Xdata,SGM,SGT):
allX = np.inner(Xdata,SGM)+SGT
allX = np.reshape(allX,(-1,3))
return allX
def normQuaternions(RBdata,parmDict,varyList,values):
for iObj in range(parmDict['nObj']):
pfx = str(iObj)+':'
if parmDict[pfx+'Type'] in ['Vector','Residue']:
Qori = AVdeg2Q(parmDict[pfx+'Qa'],[parmDict[pfx+'Qi'],parmDict[pfx+'Qj'],parmDict[pfx+'Qk']])
A,V = Q2AVdeg(Qori)
for i,name in enumerate(['Qa','Qi','Qj','Qk']):
if i:
parmDict[pfx+name] = V[i-1]
else:
parmDict[pfx+name] = A
def mcsaCalc(values,refList,rcov,cosTable,ifInv,allFF,RBdata,varyList,parmDict):
''' Compute structure factors for all h,k,l for phase
input:
refList: [ref] where each ref = h,k,l,m,d,...
rcov: array[nref,nref] covariance terms between Fo^2 values
ifInv: bool True if centrosymmetric
allFF: array[nref,natoms] each value is mult*FF(H)/max(mult)
RBdata: [dict] rigid body dictionary
varyList: [list] names of varied parameters in MC/SA (not used here)
ParmDict: [dict] problem parameters
puts result F^2 in each ref[5] in refList
returns:
delt-F*rcov*delt-F/sum(Fo^2)
'''
global tsum,nsum
t0 = time.time()
parmDict.update(dict(zip(varyList,values))) #update parameter tables
Xdata = GetAtomX(RBdata,parmDict) #get new atom coords from RB
allX = getAllX(Xdata,SGM,SGT) #fill unit cell - dups. OK
MDval = parmDict['0:MDval'] #get March-Dollase coeff
HX2pi = 2.*np.pi*np.inner(allX,refList[:3].T) #form 2piHX for every H,X pair
Aterm = refList[3]*np.sum(allFF*np.cos(HX2pi),axis=0)**2 #compute real part for all H
refList[5] = Aterm
if not ifInv:
refList[5] += refList[3]*np.sum(allFF*np.sin(HX2pi),axis=0)**2 #imaginary part for all H
if len(cosTable): #apply MD correction
refList[5] *= np.sum(np.sqrt((MDval/(cosTable*(MDval**3-1.)+1.))**3),axis=1)/cosTable.shape[1]
sumFcsq = np.sum(refList[5])
scale = parmDict['sumFosq']/sumFcsq
refList[5] *= scale
refList[6] = refList[4]-refList[5]
M = np.inner(refList[6],np.inner(rcov,refList[6]))
tsum += (time.time()-t0)
nsum += 1
return np.sqrt(M/np.sum(refList[4]**2))
def MCSAcallback(x, f,accept):
return not pgbar.Update(min(100.,f*100),
newmsg='%s%8.4f%s'%('MC/SA Residual:',f*100,'%'))[0]
sq2pi = np.sqrt(2*np.pi)
sq4pi = np.sqrt(4*np.pi)
generalData = data['General']
Amat,Bmat = G2lat.cell2AB(generalData['Cell'][1:7])
Gmat,gmat = G2lat.cell2Gmat(generalData['Cell'][1:7])
SGData = generalData['SGData']
SGM = np.array([SGData['SGOps'][i][0] for i in range(len(SGData['SGOps']))])
SGMT = np.array([SGData['SGOps'][i][0].T for i in range(len(SGData['SGOps']))])
SGT = np.array([SGData['SGOps'][i][1] for i in range(len(SGData['SGOps']))])
fixAtoms = data['Atoms'] #if any
cx,ct,cs = generalData['AtomPtrs'][:3]
aTypes = set([])
parmDict = {'Bmat':Bmat,'Gmat':Gmat}
varyList = []
atNo = 0
for atm in fixAtoms:
pfx = ':'+str(atNo)+':'
parmDict[pfx+'Atype'] = atm[ct]
aTypes |= set([atm[ct],])
pstr = ['Ax','Ay','Az']
parmDict[pfx+'Amul'] = atm[cs+1]
for i in range(3):
name = pfx+pstr[i]
parmDict[name] = atm[cx+i]
atNo += 1
parmDict['nfixAt'] = len(fixAtoms)
MCSA = generalData['MCSA controls']
reflName = MCSA['Data source']
MCSAObjs = data['MCSA']['Models'] #list of MCSA models
upper = []
lower = []
MDvec = np.zeros(3)
for i,item in enumerate(MCSAObjs):
mfx = str(i)+':'
parmDict[mfx+'Type'] = item['Type']
if item['Type'] == 'MD':
getMDparms(item,mfx,parmDict,varyList)
MDvec = np.array(item['axis'])
elif item['Type'] == 'Atom':
getAtomparms(item,mfx,aTypes,SGData,parmDict,varyList)
parmDict[mfx+'atNo'] = atNo
atNo += 1
elif item['Type'] in ['Residue','Vector']:
atNo = getRBparms(item,mfx,aTypes,RBdata,SGData,atNo,parmDict,varyList)
parmDict['atNo'] = atNo #total no. of atoms
parmDict['nObj'] = len(MCSAObjs)
aTypes = list(aTypes)
Tdata = GetAtomT(RBdata,parmDict)
Xdata = GetAtomX(RBdata,parmDict)
Mdata = GetAtomM(Xdata,SGData)
allT,allM = getAllTX(Tdata,Mdata,Xdata,SGM,SGT)[:2]
FFtables = G2el.GetFFtable(aTypes)
refs = []
allFF = []
cosTable = []
sumFosq = 0
if 'PWDR' in reflName:
for ref in reflData:
h,k,l,m,d,pos,sig,gam,f = ref[:9]
if d >= MCSA['dmin']:
sig = np.sqrt(sig) #var -> sig in centideg
sig = .01*G2pwd.getgamFW(gam,sig) #sig,gam -> FWHM in deg
SQ = 0.25/d**2
allFF.append(allM*[G2el.getFFvalues(FFtables,SQ,True)[i] for i in allT]/np.max(allM))
refs.append([h,k,l,m,f*m,pos,sig])
sumFosq += f*m
Heqv = np.inner(np.array([h,k,l]),SGMT)
cosTable.append(G2lat.CosAngle(Heqv,MDvec,Gmat))
nRef = len(refs)
cosTable = np.array(cosTable)**2
rcov = np.zeros((nRef,nRef))
for iref,refI in enumerate(refs):
rcov[iref][iref] = 1./(sq4pi*refI[6])
for jref,refJ in enumerate(refs[:iref]):
t1 = refI[6]**2+refJ[6]**2
t2 = (refJ[5]-refI[5])**2/(2.*t1)
if t2 > 10.:
rcov[iref][jref] = 0.
else:
rcov[iref][jref] = 1./(sq2pi*np.sqrt(t1)*np.exp(t2))
rcov += (rcov.T-np.diagflat(np.diagonal(rcov)))
Rdiag = np.sqrt(np.diag(rcov))
Rnorm = np.outer(Rdiag,Rdiag)
rcov /= Rnorm
elif 'Pawley' in reflName: #need a bail out if Pawley cov matrix doesn't exist.
vNames = []
pfx = str(data['pId'])+'::PWLref:'
for iref,refI in enumerate(reflData): #Pawley reflection set
h,k,l,m,d,v,f,s = refI
if d >= MCSA['dmin'] and v: #skip unrefined ones
vNames.append(pfx+str(iref))
SQ = 0.25/d**2
allFF.append(allM*[G2el.getFFvalues(FFtables,SQ,True)[i] for i in allT]/np.max(allM))
refs.append([h,k,l,m,f*m,iref,0.])
sumFosq += f*m
Heqv = np.inner(np.array([h,k,l]),SGMT)
cosTable.append(G2lat.CosAngle(Heqv,MDvec,Gmat))
cosTable = np.array(cosTable)**2
nRef = len(refs)
# if generalData['doPawley'] and (covData['freshCOV'] or MCSA['newDmin']):
if covData['freshCOV'] or MCSA['newDmin']:
vList = covData['varyList']
covMatrix = covData['covMatrix']
rcov = getVCov(vNames,vList,covMatrix)
rcov += (rcov.T-np.diagflat(np.diagonal(rcov)))
Rdiag = np.sqrt(np.diag(rcov))
Rdiag = np.where(Rdiag,Rdiag,1.0)
Rnorm = np.outer(Rdiag,Rdiag)
rcov /= Rnorm
MCSA['rcov'] = rcov
covData['freshCOV'] = False
MCSA['newDmin'] = False
else:
rcov = MCSA['rcov']
elif 'HKLF' in reflName:
for ref in reflData:
[h,k,l,m,d],f = ref[:5],ref[6]
if d >= MCSA['dmin']:
SQ = 0.25/d**2
allFF.append(allM*[G2el.getFFvalues(FFtables,SQ,True)[i] for i in allT]/np.max(allM))
refs.append([h,k,l,m,f*m,0.,0.])
sumFosq += f*m
nRef = len(refs)
rcov = np.identity(len(refs))
allFF = np.array(allFF).T
refs = np.array(refs).T
if start:
print (' Minimum d-spacing used: %.2f No. reflections used: %d'%(MCSA['dmin'],nRef))
print (' Number of parameters varied: %d'%(len(varyList)))
start = False
parmDict['sumFosq'] = sumFosq
x0 = [parmDict[val] for val in varyList]
ifInv = SGData['SGInv']
bounds = np.array(list(zip(lower,upper)))
if MCSA['Algorithm'] == 'Basin Hopping':
# import basinhopping as bs
take_step = RandomDisplacementBounds(np.array(lower), np.array(upper))
results = so.basinhopping(mcsaCalc,x0,take_step=take_step,disp=True,T=MCSA['Annealing'][0],
interval=MCSA['Annealing'][2]/10,niter=MCSA['Annealing'][2],minimizer_kwargs={'method':'L-BFGS-B','bounds':bounds,
'args':(refs,rcov,cosTable,ifInv,allFF,RBdata,varyList,parmDict)},callback=MCSAcallback)
else:
T0 = MCSA['Annealing'][0]
if not T0:
T0 = None
results = anneal(mcsaCalc,x0,args=(refs,rcov,cosTable,ifInv,allFF,RBdata,varyList,parmDict),
schedule=MCSA['Algorithm'], dwell=MCSA['Annealing'][2],maxiter=10000,
T0=T0, Tf=MCSA['Annealing'][1],
quench=MCSA['fast parms'][0], c=MCSA['fast parms'][1],
lower=lower, upper=upper, slope=MCSA['log slope'],ranStart=MCSA.get('ranStart',False),
ranRange=MCSA.get('ranRange',10.)/100.,autoRan=MCSA.get('autoRan',False),dlg=pgbar)
print (' Acceptance rate: %.2f%% MCSA residual: %.2f%%'%(100.*results[5]/results[3],100.*results[1]))
results = so.minimize(mcsaCalc,results[0],method='L-BFGS-B',args=(refs,rcov,cosTable,ifInv,allFF,RBdata,varyList,parmDict),
bounds=bounds,)
mcsaCalc(results['x'],refs,rcov,cosTable,ifInv,allFF,RBdata,varyList,parmDict)
Result = [False,False,results['fun'],0.0,]+list(results['x'])
Result.append(varyList)
return Result,tsum,nsum,rcov
################################################################################
##### Quaternion stuff
################################################################################
def prodQQ(QA,QB):
''' Grassman quaternion product
QA,QB quaternions; q=r+ai+bj+ck
'''
D = np.zeros(4)
D[0] = QA[0]*QB[0]-QA[1]*QB[1]-QA[2]*QB[2]-QA[3]*QB[3]
D[1] = QA[0]*QB[1]+QA[1]*QB[0]+QA[2]*QB[3]-QA[3]*QB[2]
D[2] = QA[0]*QB[2]-QA[1]*QB[3]+QA[2]*QB[0]+QA[3]*QB[1]
D[3] = QA[0]*QB[3]+QA[1]*QB[2]-QA[2]*QB[1]+QA[3]*QB[0]
# D[0] = QA[0]*QB[0]-np.dot(QA[1:],QB[1:])
# D[1:] = QA[0]*QB[1:]+QB[0]*QA[1:]+np.cross(QA[1:],QB[1:])
return D
def normQ(QA):
''' get length of quaternion & normalize it
q=r+ai+bj+ck
'''
n = np.sqrt(np.sum(np.array(QA)**2))
return QA/n
def invQ(Q):
'''
get inverse of quaternion
q=r+ai+bj+ck; q* = r-ai-bj-ck
'''
return Q*np.array([1,-1,-1,-1])
def prodQVQ(Q,V):
"""
compute the quaternion vector rotation qvq-1 = v'
q=r+ai+bj+ck
"""
T2 = Q[0]*Q[1]
T3 = Q[0]*Q[2]
T4 = Q[0]*Q[3]
T5 = -Q[1]*Q[1]
T6 = Q[1]*Q[2]
T7 = Q[1]*Q[3]
T8 = -Q[2]*Q[2]
T9 = Q[2]*Q[3]
T10 = -Q[3]*Q[3]
M = np.array([[T8+T10,T6-T4,T3+T7],[T4+T6,T5+T10,T9-T2],[T7-T3,T2+T9,T5+T8]])
VP = 2.*np.inner(V,M)
return VP+V
def Q2Mat(Q):
''' make rotation matrix from quaternion
q=r+ai+bj+ck
'''
QN = normQ(Q)
aa = QN[0]**2
ab = QN[0]*QN[1]
ac = QN[0]*QN[2]
ad = QN[0]*QN[3]
bb = QN[1]**2
bc = QN[1]*QN[2]
bd = QN[1]*QN[3]
cc = QN[2]**2
cd = QN[2]*QN[3]
dd = QN[3]**2
M = [[aa+bb-cc-dd, 2.*(bc-ad), 2.*(ac+bd)],
[2*(ad+bc), aa-bb+cc-dd, 2.*(cd-ab)],
[2*(bd-ac), 2.*(ab+cd), aa-bb-cc+dd]]
return np.array(M)
def AV2Q(A,V):
''' convert angle (radians) & vector to quaternion
q=r+ai+bj+ck
'''
Q = np.zeros(4)
d = nl.norm(np.array(V))
if d:
V = V/d
if not A: #==0.
A = 2.*np.pi
p = A/2.
Q[0] = np.cos(p)
Q[1:4] = V*np.sin(p)
else:
Q[3] = 1.
return Q
def AVdeg2Q(A,V):
''' convert angle (degrees) & vector to quaternion
q=r+ai+bj+ck
'''
Q = np.zeros(4)
d = nl.norm(np.array(V))
if not A: #== 0.!
A = 360.
if d:
V = V/d
p = A/2.
Q[0] = cosd(p)
Q[1:4] = V*sind(p)
else:
Q[3] = 1.
return Q
def Q2AVdeg(Q):
''' convert quaternion to angle (degrees 0-360) & normalized vector
q=r+ai+bj+ck
'''
A = 2.*acosd(Q[0])
V = np.array(Q[1:])
V = V/sind(A/2.)
return A,V
def Q2AV(Q):
''' convert quaternion to angle (radians 0-2pi) & normalized vector
q=r+ai+bj+ck
'''
A = 2.*np.arccos(Q[0])
V = np.array(Q[1:])
V = V/np.sin(A/2.)
return A,V
def randomQ(r0,r1,r2,r3):
''' create random quaternion from 4 random numbers in range (-1,1)
'''
sum = 0
Q = np.array(4)
Q[0] = r0
sum += Q[0]**2
Q[1] = np.sqrt(1.-sum)*r1
sum += Q[1]**2
Q[2] = np.sqrt(1.-sum)*r2
sum += Q[2]**2
Q[3] = np.sqrt(1.-sum)*np.where(r3<0.,-1.,1.)
return Q
def randomAVdeg(r0,r1,r2,r3):
''' create random angle (deg),vector from 4 random number in range (-1,1)
'''
return Q2AVdeg(randomQ(r0,r1,r2,r3))
def makeQuat(A,B,C):
''' Make quaternion from rotation of A vector to B vector about C axis
:param np.array A,B,C: Cartesian 3-vectors
:returns: quaternion & rotation angle in radians q=r+ai+bj+ck
'''
V1 = np.cross(A,C)
V2 = np.cross(B,C)
if nl.norm(V1)*nl.norm(V2):
V1 = V1/nl.norm(V1)
V2 = V2/nl.norm(V2)
V3 = np.cross(V1,V2)
else:
V3 = np.zeros(3)
Q = np.array([0.,0.,0.,1.])
D = 0.
if nl.norm(V3):
V3 = V3/nl.norm(V3)
D1 = min(1.0,max(-1.0,np.vdot(V1,V2)))
D = np.arccos(D1)/2.0
V1 = C-V3
V2 = C+V3
DM = nl.norm(V1)
DP = nl.norm(V2)
S = np.sin(D)
Q[0] = np.cos(D)
Q[1:] = V3*S
D *= 2.
if DM > DP:
D *= -1.
return Q,D
def annealtests():
from numpy import cos
# minimum expected at ~-0.195
func = lambda x: cos(14.5*x-0.3) + (x+0.2)*x
print (anneal(func,1.0,full_output=1,upper=3.0,lower=-3.0,feps=1e-4,maxiter=2000,schedule='cauchy'))
print (anneal(func,1.0,full_output=1,upper=3.0,lower=-3.0,feps=1e-4,maxiter=2000,schedule='fast'))
print (anneal(func,1.0,full_output=1,upper=3.0,lower=-3.0,feps=1e-4,maxiter=2000,schedule='boltzmann'))
# minimum expected at ~[-0.195, -0.1]
func = lambda x: cos(14.5*x[0]-0.3) + (x[1]+0.2)*x[1] + (x[0]+0.2)*x[0]
print (anneal(func,[1.0, 1.0],full_output=1,upper=[3.0, 3.0],lower=[-3.0, -3.0],feps=1e-4,maxiter=2000,schedule='cauchy'))
print (anneal(func,[1.0, 1.0],full_output=1,upper=[3.0, 3.0],lower=[-3.0, -3.0],feps=1e-4,maxiter=2000,schedule='fast'))
print (anneal(func,[1.0, 1.0],full_output=1,upper=[3.0, 3.0],lower=[-3.0, -3.0],feps=1e-4,maxiter=2000,schedule='boltzmann'))
if __name__ == '__main__':
annealtests()
|
AntonGagin/GSAS_USE
|
patchSystErrors/originalOld/GSASIImath.py
|
Python
|
gpl-3.0
| 199,608
|
[
"CRYSTAL"
] |
c20343d3cd265a0d88c6af6eff6004ef8310c604235c65359c7af0de38bd8a29
|
#!/usr/bin/env python
# -*- coding:utf-8 mode:python; tab-width:4; indent-tabs-mode:nil; py-indent-offset:4 -*-
##
"""
test_cpinterface
~~~~~~~~~~~~~~
Test chemical program interface code that is not tied to any one specific
back-end.
"""
import sys
import unittest
import cpinterface
import geoprep
from tests.common_testcode import runSuite
class CPITestCase(unittest.TestCase):
def setUp(self):
self.G = geoprep.Geotool()
self.C = cpinterface.MolecularCalculator()
def test_missing_basis_name(self):
#error given for atoms lacking a basis set assignment
methylium = self.G.make_fragment("[CH3+]")
methane = self.G.make_fragment("C")
methylium.set_basis_name("cc-pVDZ")
s = geoprep.System([methylium, methane])
self.assertRaises(ValueError, self.C.get_basis_data, s,
{"basis_format" : "gamess-us"})
def test_bad_basis_name(self):
#error given for atoms with unknown basis set name
methylium = self.G.make_fragment("[CH3+]")
methane = self.G.make_fragment("C")
methylium.set_basis_name("cc-pVDZ")
methane.set_basis_name("bozo-basis")
s = geoprep.System([methylium, methane])
self.assertRaises(ValueError, self.C.get_basis_data, s,
{"basis_format" : "gamess-us"})
def test_mixed_basis_representation(self):
#error given for attempting to mix spherical and cartesian basis
#functions in a single system
methylium = self.G.make_fragment("[CH3+]")
methane = self.G.make_fragment("C")
methylium.set_basis_name("cc-pVDZ")
methane.set_basis_name("6-31G")
s = geoprep.System([methylium, methane])
self.assertRaises(ValueError, self.C.get_basis_data, s,
{"basis_format" : "gamess-us"})
def test_missing_basis_elements(self):
#error given for elements unparameterized by chosen basis set
lewisite = self.G.make_fragment("Cl[As](Cl)\C=C\Cl")
lewisite.set_basis_name("TZ (Dunning)")
s = geoprep.System([lewisite])
self.assertRaises(ValueError, self.C.get_basis_data, s,
{"basis_format" : "gamess-us"})
def test_basis_retrieval(self):
#basic error-free basis retrieval
lewisite = self.G.make_fragment("Cl[As](Cl)\C=C\Cl")
lewisite.set_basis_name("cc-pVDZ")
chlorines = lewisite.select("[Cl]", hydrogen="exclude")
lewisite.set_basis_name("cc-pVTZ", chlorines)
s = geoprep.System([lewisite])
bd = self.C.get_basis_data(s, {"basis_format" : "gamess-us"})
self.assertEqual("spherical", bd["spherical_or_cartesian"])
data = bd["data"]
self.assertEqual(["cc-pVDZ", "cc-pVTZ"], sorted(data.keys()))
tzd = "".join(data["cc-pVTZ"].values())
self.assertTrue("CHLORINE" in tzd)
dzd = "".join(data["cc-pVDZ"].values())
for name in ["HYDROGEN", "CARBON", "ARSENIC"]:
self.assertTrue(name in dzd)
def runTests():
try:
test_name = sys.argv[1]
except IndexError:
test_name = None
if test_name:
result = runSuite(CPITestCase, name = test_name)
else:
result = runSuite(CPITestCase)
return result
if __name__ == '__main__':
runTests()
|
mattbernst/polyhartree
|
tests/test_cpinterface.py
|
Python
|
gpl-3.0
| 3,415
|
[
"GAMESS"
] |
19f53815e9162dfcb489cd2449daf161e4d00cdef0e74150b7fbdbaa7d575419
|
"""
A Python interface for Discount, the C Markdown parser
This module contains ``libmarkdown``, a ctypes binding for Discount,
as well as ``Markdown``, a helper class built on top of this library.
Visit the Discount homepage:
http://www.pell.portland.or.us/~orc/Code/discount/
Basic usage examples:
>>> md = Markdown('`test`')
>>> md.get_html_content()
'<p><code>test</code></p>'
>>> md = Markdown(sys.stdin, autolink=True)
>>> md.write_html_content(sys.stdout)
See the ``Markdown`` docstrings for all keyword arguments, or the
docstrings for ``libmarkdown`` if you want to use the C functions
directly.
"""
import ctypes
import libmarkdown
_KWARGS_TO_LIBMARKDOWN_FLAGS = {
'toc': libmarkdown.MKD_TOC,
'strict': libmarkdown.MKD_STRICT,
'autolink': libmarkdown.MKD_AUTOLINK,
'safelink': libmarkdown.MKD_SAFELINK,
'ignore_header': libmarkdown.MKD_NOHEADER,
'ignore_links': libmarkdown.MKD_NOLINKS,
'ignore_images': libmarkdown.MKD_NOIMAGE,
'ignore_tables': libmarkdown.MKD_NOTABLES,
'ignore_smartypants': libmarkdown.MKD_NOPANTS,
'ignore_embedded_html': libmarkdown.MKD_NOHTML,
'ignore_pseudo_protocols': libmarkdown.MKD_NO_EXT,
}
def add_html5_tags():
"""
Adds (globally, and non-removably) a handful of new tags for html5
support.
"""
libmarkdown.mkd_with_html5_tags()
def define_tag(tag, selfclose=False):
if selfclose:
_selfclose = 1
else:
_selfclose = 0
cp = ctypes.c_char_p(tag)
libmarkdown.mkd_define_tag(cp, _selfclose)
class MarkdownError(Exception):
"""
Exception raised when a discount c function
returns an error code, ``-1``.
"""
def __str__(self):
return '%s failure' % self.args[0]
class Markdown(object):
"""
Markdown to HTML conversion.
A single argument is required, ``input_file_or_string``, the
Markdown formatted data. If this argument is a file-like object,
the file must be a real OS file descriptor, i.e. ``sys.stdin``
yes, a ``StringIO`` object, no. The argument is otherwise assumed
to be a string-like object. The same is true for ``Markdown``
methods that write HTML output to files.
Optionally, you can specify two callback functions,
``rewrite_links_func`` and ``link_attrs_func``, which are hooks
when links are processed in the markdown document (See the
``rewrite_links()`` and ``link_attrs()`` methods).
Additional boolean keyword arguments are also accepted:
``toc`` : bool
Generate table-of-contents headers (each generated <h1>,
<h2>, etc will include a id="name" argument.) Use
``get_html_toc()`` or ``write_html_toc()`` to generate the
table-of-contents itself.
``strict``
Disable relaxed emphasis and superscripts.
``autolink``
Greedily expand links; if a url is encountered, convert it to
a hyperlink even if it isn't surrounded with ``<>s``.
``safelink``
Be paranoid about how ``[][]`` is expanded into a link - if the
url isn't a local reference, ``http://``, ``https://``, ``ftp://``,
or ``news://``, it will not be converted into a hyperlink.
``ignore_header``
Do not process the document header, but treat it like regular
text. See http://johnmacfarlane.net/pandoc/README.html#title-blocks
``ignore_links``
Do not allow ``<a`` or expand ``[][]`` into a link.
``ignore_images``
Do not allow ``<img`` or expand ``![][]`` into a image.
``ignore_tables``
Don't process PHP Markdown Extra tables. See
http://michelf.com/projects/php-markdown/extra/.
``ignore_smartypants``
Disable SmartyPants processing. See
http://daringfireball.net/projects/smartypants/.
``ignore_embedded_html``
Disable all embedded HTML by replacing all ``<``'s with ``<``.
``ignore_pseudo_protocols``
Do not process pseudo-protocols. See
http://www.pell.portland.or.us/~orc/Code/discount/#pseudo
Pandoc header elements can be retrieved with the methods
``get_pandoc_title()``, ``get_pandoc_author()`` and
``get_pandoc_date()``.
The converted HTML document parts can be retrieved as a string
with the ``get_html_css()``, ``get_html_toc()`` and
``get_html_content()`` methods, or written to a file with the
``write_html_css(fp)``, ``write_html_toc(fp)`` and
``write_html_content(fp)`` methods, where ``fp`` is the output
file descriptor.
"""
def __init__(
self, input_file_or_string,
rewrite_links_func=None, link_attrs_func=None,
**kwargs):
self.input = input_file_or_string
# Convert a ``kwargs`` dict to a bitmask of libmarkdown flags.
# All but one flag is exposed; MKD_1_COMPAT, which, according
# to the original documentation, is not really useful other
# than running MarkdownTest_1.0
flags = 0
for key in kwargs:
flags |= _KWARGS_TO_LIBMARKDOWN_FLAGS.get(key, 0)
self.flags = flags
if rewrite_links_func is not None:
self.rewrite_links(rewrite_links_func)
if link_attrs_func is not None:
self.link_attrs(link_attrs_func)
self._alloc = []
def __del__(self):
try:
libmarkdown.mkd_cleanup(self._doc)
except AttributeError:
pass
def _get_compiled_doc(self):
if not hasattr(self, '_doc'):
if hasattr(self.input, 'read'):
# If the input is file-like
input_ = ctypes.pythonapi.PyFile_AsFile(self.input)
self._doc = libmarkdown.mkd_in(input_, self.flags)
else:
# Otherwise, treat it as a string
input_ = ctypes.c_char_p(self.input)
self._doc = libmarkdown.mkd_string(
input_, len(self.input), self.flags)
ret = libmarkdown.mkd_compile(self._doc, self.flags)
if ret == -1:
raise MarkdownError('mkd_compile')
if hasattr(self, '_rewrite_links_func'):
libmarkdown.mkd_e_url(self._doc, self._rewrite_links_func)
if hasattr(self, '_link_attrs_func'):
libmarkdown.mkd_e_flags(self._doc, self._link_attrs_func)
return self._doc
def _generate_html_content(self, fp=None):
if fp is not None:
fp_ = ctypes.pythonapi.PyFile_AsFile(fp)
ret = libmarkdown.mkd_generatehtml(self._get_compiled_doc(), fp_)
if ret == -1:
raise MarkdownError('mkd_generatehtml')
else:
sb = ctypes.c_char_p('')
ln = libmarkdown.mkd_document(self._get_compiled_doc(), ctypes.byref(sb))
if ln == -1:
raise MarkdownError('mkd_document')
else:
return sb.value[:ln] if sb.value else ''
self._alloc = []
def _generate_html_toc(self, fp=None):
self.flags |= libmarkdown.MKD_TOC
if fp is not None:
fp_ = ctypes.pythonapi.PyFile_AsFile(fp)
ret = libmarkdown.mkd_generatetoc(self._get_compiled_doc(), fp_)
if ret == -1:
raise MarkdownError('mkd_generatetoc')
else:
sb = ctypes.c_char_p('')
ln = libmarkdown.mkd_toc(self._get_compiled_doc(), ctypes.byref(sb))
if ln == -1:
raise MarkdownError('mkd_toc')
else:
return sb.value[:ln] if sb.value else ''
self._alloc = []
def _generate_html_css(self, fp=None):
if fp is not None:
fp_ = ctypes.pythonapi.PyFile_AsFile(fp)
ret = libmarkdown.mkd_generatecss(self._get_compiled_doc(), fp_)
# Returns -1 even on success
# if ret == -1:
# raise MarkdownError('mkd_generatecss')
else:
sb = ctypes.c_char_p('')
ln = libmarkdown.mkd_css(self._get_compiled_doc(), ctypes.byref(sb))
if ln == -1:
raise MarkdownError('mkd_css')
else:
return sb.value[:ln] if sb.value else ''
self._alloc = []
def rewrite_links(self, func):
"""
Add a callback for rewriting links.
The callback should take a single argument, the url, and
should return a replacement url. The callback function is
called everytime a ``[]()`` or ``<link>`` is processed.
You can use this method as a decorator on the function you
want to set as the callback.
"""
@libmarkdown.e_url_callback
def _rewrite_links_func(string, size, context):
ret = func(string[:size])
if ret is not None:
buf = ctypes.create_string_buffer(ret)
self._alloc.append(buf)
return ctypes.addressof(buf)
self._rewrite_links_func = _rewrite_links_func
return func
def link_attrs(self, func):
"""
Add a callback for adding attributes to links.
The callback should take a single argument, the url, and
should return additional text to be inserted in the link tag,
i.e. ``"target="_blank"``.
You can use this method as a decorator on the function you
want to set as the callback.
"""
@libmarkdown.e_flags_callback
def _link_attrs_func(string, size, context):
ret = func(string[:size])
if ret is not None:
buf = ctypes.create_string_buffer(ret)
self._alloc.append(buf)
return ctypes.addressof(buf)
self._link_attrs_func = _link_attrs_func
return func
def get_pandoc_title(self):
"""
Get the document title from the pandoc header.
"""
return libmarkdown.mkd_doc_title(self._get_compiled_doc())
def get_pandoc_author(self):
"""
Get the document author(s) from the pandoc header.
"""
return libmarkdown.mkd_doc_author(self._get_compiled_doc())
def get_pandoc_date(self):
"""
Get the document date from the pandoc header.
"""
return libmarkdown.mkd_doc_date(self._get_compiled_doc())
def get_html_content(self):
"""
Get the document content as HTML.
"""
return self._generate_html_content()
def get_html_toc(self):
"""
Get the document's table of contents as HTML.
"""
return self._generate_html_toc()
def get_html_css(self):
"""
Get any style blocks in the document as HTML.
"""
return self._generate_html_css()
def write_html_content(self, fp):
"""
Write the document content to the file, ``fp``.
"""
self._generate_html_content(fp)
def write_html_toc(self, fp):
"""
Write the document's table of contents to the file, ``fp``.
"""
self._generate_html_toc(fp)
def write_html_css(self, fp):
"""
Write any style blocks in the document to the file, ``fp``.
"""
self._generate_html_css(fp)
|
trapeze/python-discount
|
discount/__init__.py
|
Python
|
bsd-3-clause
| 11,262
|
[
"VisIt"
] |
f2c7fe24470fa1fb4df816d995e8d02065fbcac5afa61a8f3964781ce8a08917
|
# Copyright (c) Charl P. Botha, TU Delft
# All rights reserved.
# See COPYRIGHT for details.
import itk
import module_kits.itk_kit as itk_kit
from module_base import ModuleBase
from module_mixins import ScriptedConfigModuleMixin
class gaussianConvolve(ScriptedConfigModuleMixin, ModuleBase):
_orders = ['Zero', 'First', 'Second']
def __init__(self, module_manager):
ModuleBase.__init__(self, module_manager)
self._config.direction = 0
self._config.sigma = 1.0
self._config.order = 'Zero'
self._config.normaliseAcrossScale = False
configList = [
('Direction:', 'direction', 'base:int', 'choice',
'Direction in which the filter has to be applied.',
['0', '1', '2']),
('Sigma:', 'sigma', 'base:float', 'text',
'Sigma of Gaussian kernel in world coordinates.'),
('Order of Gaussian', 'order', 'base:str', 'choice',
'Convolve with Gaussian, or first or second derivative.',
tuple(self._orders)),
('Normalise across scale', 'normaliseAcrossScale', 'base:bool',
'checkbox', 'Determine and use normalisation factor.')]
# setup the pipeline
if3 = itk.Image[itk.F, 3]
self._gaussian = itk.RecursiveGaussianImageFilter[if3,if3].New()
itk_kit.utils.setupITKObjectProgress(
self, self._gaussian, 'itkRecursiveGaussianImageFilter',
'Convolving with Gaussian')
ScriptedConfigModuleMixin.__init__(
self, configList,
{'Module (self)' : self,
'itkRecursiveGaussianImageFilter' : self._gaussian})
self.sync_module_logic_with_config()
def close(self):
# we play it safe... (the graph_editor/module_manager should have
# disconnected us by now)
for input_idx in range(len(self.get_input_descriptions())):
self.set_input(input_idx, None)
# this will take care of all display thingies
ScriptedConfigModuleMixin.close(self)
# and the baseclass close
ModuleBase.close(self)
# remove all bindings
del self._gaussian
def execute_module(self):
self._gaussian.Update()
def get_input_descriptions(self):
return ('ITK Image (3D, float)',)
def set_input(self, idx, inputStream):
self._gaussian.SetInput(inputStream)
def get_output_descriptions(self):
return ('Blurred ITK Image (3D, float)',)
def get_output(self, idx):
return self._gaussian.GetOutput()
def config_to_logic(self):
self._gaussian.SetDirection(self._config.direction)
# SIGMA
self._gaussian.SetSigma(self._config.sigma)
# ORDER
if self._config.order == 'Zero':
self._gaussian.SetZeroOrder()
elif self._config.order == 'First':
self._gaussian.SetFirstOrder()
elif self._config.order == 'Second':
self._gaussian.SetSecondOrder()
else:
self._config.order = 'Zero'
self._gaussian.SetZeroOrder()
# NORMALISEACROSSSCALE
self._gaussian.SetNormalizeAcrossScale(
self._config.normaliseAcrossScale)
def logic_to_config(self):
self._config.direction = self._gaussian.GetDirection()
# SIGMA
self._config.sigma = self._gaussian.GetSigma()
# ORDER
# FIMXE: dammit, we can't get the order.
# NORMALISEACROSSSCALE
self._config.normaliseAcrossScale = self._gaussian.\
GetNormalizeAcrossScale()
|
nagyistoce/devide
|
modules/insight/gaussianConvolve.py
|
Python
|
bsd-3-clause
| 3,720
|
[
"Gaussian"
] |
42f04cabf3c278281fb629e6475954dba41152f0dd48be6f63baf58acf05c42a
|
#!/usr/bin/python
# coding=utf-8
import logging as log
import argparse
import os
import datetime
import time
import urllib
import mimetypes
import BaseHTTPServer
import socket
from operator import attrgetter
VERBOSITY = (
log.ERROR,
log.WARNING,
log.INFO,
log.DEBUG,
)
class Config:
__version__ = '0.1'
__website__ = 'http://github.com/tbienko/catfeed'
chunk_size = 1024*1024
# configurable via commandline
verbosity = 2
catalog = '.'
after_download = 'move'
moveto = 'Downloaded'
host = 'auto'
port = '8888'
title = ''
class FileDescriptor(object):
path = ""
name = ""
date = None
size = 0
def __repr__(self):
return self.name
@classmethod
def scan_catalog(self):
items = []
for subdir, dirs, files in os.walk(Config.catalog):
for file in files:
if Config.after_download == 'move' and \
subdir.startswith(Config.moveto):
continue
path = os.path.join(subdir, file)
(mode, ino, dev, nlink, uid, gid,
size, atime, mtime, ctime) = os.stat(path)
desc = FileDescriptor()
desc.path = path
desc.name = os.path.splitext(file)[0]
desc.date = ctime
desc.size = size
items.append(desc)
return items
@classmethod
def get_for_path(self, urlpath):
for item in FileDescriptor.scan_catalog():
if item.urlpath == urlpath:
return item
return None
def delete(self):
log.debug("Deleting: %s", self.path)
os.remove(self.path)
def move(self):
target = os.path.join(Config.moveto, self.relativepath)
targetdir = os.path.dirname(target)
log.debug("Moving file from: %s to: %s", self.path, target)
if not os.path.isdir(targetdir):
os.makedirs(targetdir)
os.rename(self.path, target)
@property
def relativepath(self):
return self.path[len(Config.catalog)+1:]
@property
def urlpath(self):
name = self.relativepath
name = name.replace(' ', '-')
name = urllib.quote(name)
return name
@property
def mime(self):
mime = mimetypes.guess_type(self.path)[0]
return mime if mime is not None else "text/plain"
class CatFeed:
def setup_argparser(self):
parser = argparse.ArgumentParser(
description='Simple feed server for your files. \
Generate feed for local catalog and download files directly \
in your favourite podcast player!',
epilog='Visit ' + Config.__website__ + ' to get the newest \
script. You can also contribute to this project there.'
)
parser.add_argument(
'-H', '--host', type=str, default=Config.host,
help='host to start server on (default: ' + Config.host + ')'
)
parser.add_argument(
'-p', '--port', type=int, default=Config.port,
help='port to start server on (default: ' + Config.port + ')'
)
parser.add_argument(
'-m', '--moveto', type=str, default=Config.moveto, metavar='PATH',
help='catalog to move downloaded files (absolute or relative path,\
default: ' + Config.moveto + ')'
)
parser.add_argument(
'-d', '--delete', action='store_true',
help='remove downloaded files instead of moving to other catalog'
)
parser.add_argument(
'-t', '--title', type=str,
help='title of feed (default: name of catalog)'
)
parser.add_argument(
'-v', '--verbosity', type=int,
choices=range(len(VERBOSITY)), default=Config.verbosity,
help='level of logging (0-ERROR ... 3-DEBUG)'
)
parser.add_argument(
'catalog', type=str,
help='catalog to generate feed from (absolute or relative path)'
)
return parser
def args_to_config(self, args):
log.debug("Raw args from argparse: %s", args)
# loading arguments to class object
Config.verbosity = args.verbosity
log.basicConfig(level=VERBOSITY[Config.verbosity])
Config.catalog = os.path.abspath(args.catalog)
log.info("Files will be served from: %s", Config.catalog)
if args.delete:
Config.after_download = 'delete'
log.info("Downloaded files will be deleted")
else:
Config.after_download = 'move'
Config.moveto = self.generate_move_path(args.moveto)
log.info("Downloaded files will be moved to: %s", Config.moveto)
if args.host == "auto":
Config.host = self.find_ip()
log.info("Host automatically resolved to: %s", Config.host)
else:
Config.host = args.host
log.info("Host set to: %s", Config.host)
Config.port = args.port
log.info("Port set to: %s", Config.port)
if args.title is None:
Config.title = os.path.basename(Config.catalog)
log.info("Feed title generated from name of catalog: %s",
Config.title)
else:
Config.title = args.title
log.info("Feed title set to: %s", Config.title)
def find_ip(self):
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("google.com", 80))
return s.getsockname()[0]
s.close()
except:
return "127.0.0.1"
def generate_move_path(self, move):
path = os.path.realpath(move)
if os.path.isdir(path):
return path
return os.path.normpath("%s/%s" % (Config.catalog, move))
def start_server(self):
print "Your feed is served on %s" % Feed().feed_url()
httpd = BaseHTTPServer.HTTPServer((Config.host, Config.port),
RequestHandler)
try:
httpd.serve_forever()
except (KeyboardInterrupt, SystemExit):
log.info('Stopping...')
httpd.server_close()
def __init__(self):
log.basicConfig(level=VERBOSITY[Config.verbosity],
format='[%(levelname)s] %(message)s')
log.info("Starting CatFeed version %s", Config.__version__)
parser = self.setup_argparser()
args = parser.parse_args()
self.args_to_config(args)
self.start_server()
log.info('Stopped.')
class Feed:
def base_url(self):
return "http://%s:%s/" % (Config.host, Config.port)
def feed_url(self):
return self.base_url()
def item_url(self, item):
return self.base_url() + item.urlpath
def atom_date(self, timestamp):
return datetime.datetime.fromtimestamp(timestamp).isoformat("T") + "Z"
def generate_feed(self, items):
items = sorted(items, key=attrgetter('date'), reverse=True)
feed = []
params = {
'feed': self.feed_url(),
'base': self.base_url(),
'title': Config.title,
'updated': self.atom_date(time.time())
}
feed.append("""<feed xmlns="http://www.w3.org/2005/Atom">
<id>%(feed)s</id>
<title>%(title)s</title>
<updated>%(updated)s</updated>
<link href="%(base)s" />
<link rel="self" href="%(feed)s" />
<author>
<name>CatFeed</name>
</author>""" % params)
for item in items:
params = {
'name': item.name,
'link': self.item_url(item),
'date': self.atom_date(item.date),
'mime': item.mime,
'size': item.size
}
feed.append("""
<entry>
<id>%(link)s</id>
<title>%(name)s</title>
<updated>%(date)s</updated>
<link href="%(link)s" />
<summary></summary>
<link rel="enclosure"
type="%(mime)s"
title="%(name)s"
href="%(link)s"
length="%(size)d" />
</entry>\n""" % params)
feed.append("</feed>")
return ''.join(feed)
class RequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
def do_HEAD(self):
self.serve(self.path, "HEAD")
def do_GET(self):
self.serve(self.path, "GET")
def serve(self, path, request_type):
log.debug("New request %s: %s", request_type, self.path)
if self.path == '/':
self.serve_feed(request_type)
return
item = FileDescriptor.get_for_path(self.path.lstrip('/'))
if item is not None:
log.debug("Requested path found")
self.serve_file(request_type, item)
else:
self.serve_404(request_type)
def serve_404(self, request_type):
log.debug("Response to %s: %s - 404", request_type, self.path)
self.send_response(404)
self.send_header("Content-type", "text/html")
self.end_headers()
self.wfile.write("404 Not Found")
def serve_feed(self, request_type):
log.debug("Response to %s: %s - Feed", request_type, self.path)
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
if request_type == 'HEAD':
return
items = FileDescriptor.scan_catalog()
feed = Feed().generate_feed(items)
self.wfile.write(feed)
def serve_file(self, request_type, item):
log.debug("Response to %s: %s - Serving file %s",
request_type, self.path, item.path)
f = open(item.path, 'rb')
self.send_response(200)
self.send_header('Content-type', item.mime)
self.end_headers()
if request_type == 'HEAD':
return
while True:
chunk = f.read(Config.chunk_size)
if chunk:
self.wfile.write(chunk)
else:
break
f.close()
log.debug("File downloaded: %s", item.path)
callback = getattr(item, Config.after_download)
callback()
def main():
CatFeed()
if __name__ == "__main__":
main()
|
tbienko/catfeed
|
catfeed.py
|
Python
|
mit
| 10,641
|
[
"VisIt"
] |
5653a1e08ca647d617f9aa2c9c19f55351d557097d42631f2b4a76c466da5a95
|
#
# Copyright 2016 The BigDL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import types
import logging
import numbers
import torch
import numpy as np
from bigdl.orca.learn.pytorch.training_operator import TrainingOperator
from bigdl.orca.learn.pytorch.pytorch_pyspark_worker import PytorchPysparkWorker
from bigdl.orca.learn.utils import maybe_dataframe_to_xshards, dataframe_to_xshards, \
convert_predict_xshards_to_dataframe, make_data_creator, update_predict_xshards, \
process_xshards_of_pandas_dataframe
from bigdl.orca.data import SparkXShards
from bigdl.orca import OrcaContext
from bigdl.orca.learn.base_estimator import BaseEstimator
from bigdl.dllib.utils.file_utils import enable_multi_fs_load, enable_multi_fs_save, \
get_remote_file_to_local
from bigdl.dllib.utils.common import get_node_and_core_number
from bigdl.orca.learn.log_monitor import start_log_server
from bigdl.orca.learn.utils import find_free_port, find_ip_and_free_port
from bigdl.dllib.utils.utils import get_node_ip
def partition_to_creator(partition):
def data_creator(config, batch_size):
from bigdl.orca.data.utils import ray_partition_get_data_label, index_data, get_size
from torch.utils.data import Dataset, DataLoader
class NDArrayDataset(Dataset):
def __init__(self, x, y):
self.x = x # features
self.y = y # labels
def __len__(self):
return get_size(self.y)
def __getitem__(self, i):
return index_data(self.x, i), index_data(self.y, i)
params = {"batch_size": batch_size, "shuffle": True}
for arg in ["shuffle", "sampler", "batch_sampler", "num_workers", "collate_fn",
"pin_memory", "drop_last", "timeout", "worker_init_fn",
"multiprocessing_context"]:
if arg in config:
params[arg] = config[arg]
data, label = ray_partition_get_data_label(partition,
allow_tuple=False,
allow_list=False)
print("Data size on worker: ", len(label))
dataset = NDArrayDataset(data, label)
data_loader = DataLoader(dataset, **params)
return data_loader
return data_creator
class PyTorchPySparkEstimator(BaseEstimator):
def __init__(
self,
*,
model_creator,
optimizer_creator,
loss_creator=None,
metrics=None,
scheduler_creator=None,
training_operator_cls=TrainingOperator,
initialization_hook=None,
config=None,
scheduler_step_freq="batch",
use_tqdm=False,
workers_per_node=1,
sync_stats=True,
log_level=logging.INFO,
model_dir=None,
log_to_driver=True):
logging.basicConfig(level=log_level,
format='[%(asctime)s] %(levelname)-8s %(message)s',
datefmt='%Y-%m-%d %H:%M:%S'
)
self.logger = logging.getLogger(__name__)
if config is not None and "batch_size" in config:
raise Exception("Please do not specify batch_size in config. Input batch_size in the"
" fit/evaluate/predict function of the estimator instead.")
self.config = {} if config is None else config
sc = OrcaContext.get_spark_context()
if not (isinstance(model_creator, types.FunctionType) and
isinstance(optimizer_creator, types.FunctionType)): # Torch model is also callable.
raise ValueError(
"Must provide a function for both model_creator and optimizer_creator")
if not training_operator_cls and not loss_creator:
raise ValueError("If a loss_creator is not provided, you must "
"provide a custom training operator.")
if not model_dir:
raise ValueError("Please specify model directory when using spark backend")
self.model_dir = model_dir
self.model_creator = model_creator
self.initialization_hook = initialization_hook
num_nodes, cores_per_node = get_node_and_core_number()
self.num_workers = num_nodes * workers_per_node
self.total_cores = num_nodes * cores_per_node
self.cores_per_worker = cores_per_node // workers_per_node
# over partition to cover tasks all over the cluster
self.workerRDD = sc.parallelize(list(range(self.total_cores * 4)),
self.total_cores * 4).repartition(self.num_workers)
self.ip = get_node_ip()
self.port = find_free_port()
is_local = sc.master.startswith("local")
self.need_to_log_to_driver = (not is_local) and log_to_driver
if self.need_to_log_to_driver:
start_log_server(self.ip, self.port)
self.worker_init_params = dict(
model_creator=self.model_creator,
optimizer_creator=optimizer_creator,
loss_creator=loss_creator,
scheduler_creator=scheduler_creator,
training_operator_cls=training_operator_cls,
scheduler_step_freq=scheduler_step_freq,
use_tqdm=use_tqdm,
config=self.config.copy(),
metrics=metrics,
size=self.num_workers,
cores_per_worker=self.cores_per_worker,
sync_stats=sync_stats,
log_level=log_level,
model_dir=self.model_dir,
log_to_driver=self.need_to_log_to_driver,
driver_ip=self.ip,
driver_port=self.port)
local_init_params = self.worker_init_params.copy()
local_init_params["log_to_driver"] = False
self.driver_runner = PytorchPysparkWorker(
mode='predict',
cluster_info=self._get_cluster_info(sc),
**local_init_params)
self.state_dict = self.driver_runner.get_state_dict()
def _get_cluster_info(self, sc):
cluster_info = self.workerRDD.barrier().mapPartitions(find_ip_and_free_port).collect()
return cluster_info
def fit(self,
data,
epochs=1,
batch_size=32,
profile=False,
reduce_results=True,
info=None,
feature_cols=None,
label_cols=None,
callbacks=[]):
"""
Trains a PyTorch model given training data for several epochs.
Calls `TrainingOperator.train_epoch()` on N parallel workers simultaneously
underneath the hood.
:param data: An instance of SparkXShards, a Spark DataFrame or a function that
takes config and batch_size as argument and returns a PyTorch DataLoader for
training.
:param epochs: The number of epochs to train the model. Default is 1.
:param batch_size: The number of samples per batch for each worker. Default is 32.
The total batch size would be workers_per_node*num_nodes.
If your training data is a function, you can set batch_size to be the input
batch_size of the function for the PyTorch DataLoader.
:param profile: Boolean. Whether to return time stats for the training procedure.
Default is False.
:param reduce_results: Boolean. Whether to average all metrics across all workers into
one dict. If a metric is a non-numerical value (or nested dictionaries), one value
will be randomly selected among the workers. If False, returns a list of dicts for
all workers.
Default is True.
:param info: An optional dictionary that can be passed to the TrainingOperator for
train_epoch and train_batch.
:param feature_cols: feature column names if data is Spark DataFrame.
:param label_cols: label column names if data is Spark DataFrame.
:param callbacks: A list for all callbacks.
:return: A list of dictionary of metrics for every training epoch. If reduce_results is
False, this will return a nested list of metric dictionaries whose length will be
equal to the total number of workers.
You can also provide custom metrics by passing in a custom training_operator_cls
when creating the Estimator.
"""
data, _ = maybe_dataframe_to_xshards(data,
validation_data=None,
feature_cols=feature_cols,
label_cols=label_cols,
mode="fit",
num_workers=self.num_workers)
sc = OrcaContext.get_spark_context()
cluster_info = self._get_cluster_info(sc)
state_dict = self._get_broadcasted_state_dict(sc)
init_params = dict(
mode="fit",
state_dict=state_dict,
cluster_info=cluster_info)
init_params.update(self.worker_init_params)
params = dict(
epochs=epochs,
batch_size=batch_size,
profile=profile,
info=info,
callbacks=callbacks,
)
if isinstance(data, SparkXShards):
# set train/validation
params["wrap_dataloader"] = False
def transform_func(iter, init_params, param):
partition_data = list(iter)
param["data_creator"] = partition_to_creator(partition_data)
runner = PytorchPysparkWorker(**init_params)
result = runner.train_epochs(**param)
runner.shutdown()
return result
res = data.rdd.repartition(self.num_workers).barrier() \
.mapPartitions(
lambda iter: transform_func(iter, init_params, params)).collect()
else:
assert isinstance(data, types.FunctionType), \
"data should be either an instance of SparkXShards or a callable function, but " \
"got type: {}".format(type(data))
params["data_creator"] = data
def transform_func(iter, init_param, param):
return PytorchPysparkWorker(**init_param).train_epochs(**param)
res = self.workerRDD.barrier().mapPartitions(
lambda iter: transform_func(iter, init_params, params)).collect()
self.state_dict = PyTorchPySparkEstimator._get_state_dict_from_remote(self.model_dir)
worker_stats = res
epoch_stats = list(map(list, zip(*worker_stats)))
if reduce_results:
for i in range(len(epoch_stats)):
epoch_stats[i] = self._process_stats(epoch_stats[i])
return epoch_stats
else:
return epoch_stats
@staticmethod
def _get_state_dict_from_remote(remote_dir):
import tempfile
import shutil
import os
try:
temp_dir = tempfile.mkdtemp()
get_remote_file_to_local(os.path.join(remote_dir, "state.pkl"),
os.path.join(temp_dir, "state.pkl"),
over_write=True)
import pickle
with open(os.path.join(temp_dir, "state.pkl"), 'rb') as f:
state_dicts = pickle.load(f)
finally:
shutil.rmtree(temp_dir)
return state_dicts
def _get_broadcasted_state_dict(self, sc):
if self.state_dict:
state_dict_b = sc.broadcast(self.state_dict)
else:
state_dict_b = None
return state_dict_b
def _get_broadcasted_state_dict(self, sc):
if self.state_dict:
state_dict_b = sc.broadcast(self.state_dict)
else:
state_dict_b = None
return state_dict_b
def _predict_spark_xshards(self, xshards, init_params, params):
def transform_func(iter, init_param, param):
partition_data = list(iter)
# res = combine_in_partition(partition_data)
param["data_creator"] = make_data_creator(partition_data)
return PytorchPysparkWorker(**init_param).predict(**params)
pred_shards = SparkXShards(xshards.rdd.mapPartitions(
lambda iter: transform_func(iter, init_params, params)))
return pred_shards
def predict(self,
data,
batch_size=32,
feature_cols=None,
profile=False):
"""
Using this PyTorch model to make predictions on the data.
:param data: An instance of SparkXShards or a Spark DataFrame
:param batch_size: The number of samples per batch for each worker. Default is 32.
:param profile: Boolean. Whether to return time stats for the training procedure.
Default is False.
:param feature_cols: feature column names if data is a Spark DataFrame.
:return: A SparkXShards that contains the predictions with key "prediction" in each shard
"""
from bigdl.orca.data import SparkXShards
from pyspark.sql import DataFrame
sc = OrcaContext.get_spark_context()
cluster_info = self._get_cluster_info(sc)
state_dict = self._get_broadcasted_state_dict(sc)
init_params = dict(
mode="predict",
state_dict=state_dict,
cluster_info=cluster_info,
)
init_params.update(self.worker_init_params)
params = dict(
batch_size=batch_size,
profile=profile
)
if isinstance(data, DataFrame):
xshards, _ = dataframe_to_xshards(data,
validation_data=None,
feature_cols=feature_cols,
label_cols=None,
mode="predict")
pred_shards = self._predict_spark_xshards(xshards, init_params, params)
result = convert_predict_xshards_to_dataframe(data, pred_shards)
elif isinstance(data, SparkXShards):
pred_shards = self._predict_spark_xshards(data, init_params, params)
result = update_predict_xshards(data, pred_shards)
else:
raise ValueError("Only xshards or Spark DataFrame is supported for predict")
return result
def evaluate(self,
data,
batch_size=32,
num_steps=None,
profile=False,
info=None,
feature_cols=None,
label_cols=None):
"""
Evaluates a PyTorch model given validation data.
Note that only accuracy for classification with zero-based label is supported by
default. You can override validate_batch in TrainingOperator for other metrics.
Calls `TrainingOperator.validate()` on N parallel workers simultaneously
underneath the hood.
:param data: An instance of SparkXShards, a Spark DataFrame or a function that
takes config and batch_size as argument and returns a PyTorch DataLoader for
validation.
:param batch_size: The number of samples per batch for each worker. Default is 32.
The total batch size would be workers_per_node*num_nodes.
If your validation data is a function, you can set batch_size to be the input
batch_size of the function for the PyTorch DataLoader.
:param num_steps: The number of batches to compute the validation results on. This
corresponds to the number of times `TrainingOperator.validate_batch` is called.
:param profile: Boolean. Whether to return time stats for the training procedure.
Default is False.
:param info: An optional dictionary that can be passed to the TrainingOperator
for validate.
:param feature_cols: feature column names if train data is Spark DataFrame.
:param label_cols: label column names if train data is Spark DataFrame.
:return: A dictionary of metrics for the given data, including validation accuracy and loss.
You can also provide custom metrics by passing in a custom training_operator_cls
when creating the Estimator.
"""
sc = OrcaContext.get_spark_context()
cluster_info = self._get_cluster_info(sc)
state_dict = self._get_broadcasted_state_dict(sc)
init_params = dict(
mode="evaluate",
state_dict=state_dict,
cluster_info=cluster_info)
init_params.update(self.worker_init_params)
params = dict(
batch_size=batch_size,
num_steps=num_steps,
profile=profile,
info=info)
from bigdl.orca.data import SparkXShards
data, _ = maybe_dataframe_to_xshards(data,
validation_data=None,
feature_cols=feature_cols,
label_cols=label_cols,
mode="evaluate",
num_workers=self.num_workers)
if isinstance(data, SparkXShards):
# set train/validation data
def transform_func(iter, init_param, param):
partition_data = list(iter)
param["data_creator"] = partition_to_creator(partition_data)
return PytorchPysparkWorker(**init_param).validate(**param)
res = data.rdd.repartition(self.num_workers).barrier() \
.mapPartitions(lambda iter: transform_func(iter, init_params, params)).collect()
else:
params["data_creator"] = data
def transform_func(iter, init_param, param):
return PytorchPysparkWorker(**init_param).validate(**param)
res = self.workerRDD.barrier().mapPartitions(
lambda iter: transform_func(iter, init_params, params)).collect()
return self._process_stats(res)
def get_model(self):
"""
Returns the learned PyTorch model.
:return: The learned PyTorch model.
"""
state = self.state_dict
model = self.model_creator(self.config)
model_state = state["models"][0]
model.load_state_dict(model_state)
return model.module if hasattr(model, "module") else model
def get_state_dict(self):
return self.state_dict
@enable_multi_fs_save
def save(self, model_path):
"""
Saves the Estimator state (including model and optimizer) to the provided model_path.
:param model_path: (str) Path to save the model.
:return:
"""
state_dict = self.state_dict
torch.save(state_dict, model_path)
return model_path
@enable_multi_fs_load
def load(self, model_path):
"""
Loads the Estimator state (including model and optimizer) from the provided model_path.
:param model_path: (str) Path to the existing model.
"""
state_dict = torch.load(model_path)
self.state_dict = state_dict
def save_checkpoint(self, model_path):
"""
Manually saves the Estimator state (including model and optimizer) to the provided
model_path.
:param model_path: (str) Path to save the model. Both local and remote path are supported.
e.g. "/tmp/estimator.ckpt" or "hdfs:///tmp/estimator.ckpt"
:return: None
"""
from bigdl.dllib.utils.file_utils import is_local_path
if is_local_path(model_path):
self.save(model_path)
else:
self.driver_runner.load_state_dict(self.state_dict)
self.driver_runner.save_checkpoint(filepath=model_path)
def load_checkpoint(self, model_path):
"""
Loads the Estimator state (including model and optimizer) from the provided model_path.
:param model_path: (str) Path to the existing model. Both local and remote path are
supported. e.g. "/tmp/estimator.ckpt" or "hdfs:///tmp/estimator.ckpt"
:return: None
"""
from bigdl.dllib.utils.file_utils import is_local_path
if is_local_path(model_path):
self.load(model_path)
else:
self.driver_runner.load_checkpoint(filepath=model_path)
self.state_dict = self.driver_runner.get_state_dict()
def _process_stats(self, worker_stats):
stats = {
"num_samples": sum(
stats.pop("num_samples", np.nan) for stats in worker_stats)
}
for stat_key in worker_stats[0]:
if isinstance(worker_stats[0], numbers.Number):
stats[stat_key] = np.nanmean(
[s.get(stat_key, np.nan) for s in worker_stats])
else:
stats[stat_key] = worker_stats[0][stat_key]
return stats
def shutdown(self):
pass
|
intel-analytics/BigDL
|
python/orca/src/bigdl/orca/learn/pytorch/pytorch_pyspark_estimator.py
|
Python
|
apache-2.0
| 21,919
|
[
"ORCA"
] |
b7bff9988fb58aa0ca64a4e38fc3b0d789514994046b249369712dcd93331f73
|
#!/usr/bin/env python
# -*- coding: iso-8859-1 -*-
#######################################################################
#
# MetrixWeather for Enigma2
# Coded by Sinthex IT-Solutions (c) 2017
# www.sinthex.de
#
#
# This plugin is licensed under the Creative Commons
# Attribution-NonCommercial-ShareAlike 3.0 Unported License.
# To view a copy of this license, visit http://creativecommons.org/licenses/by-nc-sa/3.0/
# or send a letter to Creative Commons, 559 Nathan Abbott Way, Stanford, California 94305, USA.
#
# This plugin is NOT free software. It is open source, you are allowed to
# modify it (if you keep the license), but it may not be commercially
# distributed other than under the conditions noted above.
#
#
#######################################################################
from Components.Converter.Converter import Converter
from Components.config import config, ConfigText, ConfigNumber, ConfigDateTime
from Components.Element import cached
class OMMetrixWeather(Converter, object):
def __init__(self, type):
Converter.__init__(self, type)
self.type = type
@cached
def getText(self):
try:
if config.plugins.MetrixWeather.enabled.saved_value:
if self.type == "currentLocation":
return config.plugins.MetrixWeather.currentLocation.saved_value
if self.type == "currentWeatherTemp":
return config.plugins.MetrixWeather.currentWeatherTemp.saved_value + self.getCF()
elif self.type == "currentWeatherText":
return config.plugins.MetrixWeather.currentWeatherText.saved_value
elif self.type == "currentWeatherCode":
return config.plugins.MetrixWeather.currentWeatherCode.saved_value
elif self.type == "forecastTodayCode":
return config.plugins.MetrixWeather.forecastTodayCode.saved_value
elif self.type == "forecastTodayTempMin":
return config.plugins.MetrixWeather.forecastTodayTempMin.saved_value + self.getCF()
elif self.type == "forecastTodayTempMax":
return config.plugins.MetrixWeather.forecastTodayTempMax.saved_value + self.getCF()
elif self.type == "forecastTodayText":
return config.plugins.MetrixWeather.forecastTodayText.saved_value
elif self.type == "forecastTodayDay":
return config.plugins.MetrixWeather.forecastTodayDay.saved_value
elif self.type == "forecastTomorrowCode":
return config.plugins.MetrixWeather.forecastTomorrowCode.saved_value
elif self.type == "forecastTomorrowTempMin":
return config.plugins.MetrixWeather.forecastTomorrowTempMin.saved_value + self.getCF()
elif self.type == "forecastTomorrowTempMax":
return config.plugins.MetrixWeather.forecastTomorrowTempMax.saved_value + self.getCF()
elif self.type == "forecastTomorrowText":
return config.plugins.MetrixWeather.forecastTomorrowText.saved_value
elif self.type == "forecastTomorrowDay":
return config.plugins.MetrixWeather.forecastTomorrowDay.saved_value
elif self.type == "forecast2daysCode":
return config.plugins.MetrixWeather.forecast2daysCode.saved_value
elif self.type == "forecast2daysTempMin":
return config.plugins.MetrixWeather.forecast2daysTempMin.saved_value + self.getCF()
elif self.type == "forecast2daysTempMax":
return config.plugins.MetrixWeather.forecast2daysTempMax.saved_value + self.getCF()
elif self.type == "forecast2daysText":
return config.plugins.MetrixWeather.forecast2daysText.saved_value
elif self.type == "forecast2daysDay":
return config.plugins.MetrixWeather.forecast2daysDay.saved_value
elif self.type == "forecast3daysCode":
return config.plugins.MetrixWeather.forecast3daysCode.saved_value
elif self.type == "forecast3daysTempMin":
return config.plugins.MetrixWeather.forecast3daysTempMin.saved_value + self.getCF()
elif self.type == "forecast3daysTempMax":
return config.plugins.MetrixWeather.forecast3daysTempMax.saved_value + self.getCF()
elif self.type == "forecast3daysText":
return config.plugins.MetrixWeather.forecast3daysText.saved_value
elif self.type == "forecast3daysDay":
return config.plugins.MetrixWeather.forecast3daysDay.saved_value
elif self.type == "forecast4daysCode":
return config.plugins.MetrixWeather.forecast4daysCode.saved_value
elif self.type == "forecast4daysTempMin":
return config.plugins.MetrixWeather.forecast4daysTempMin.saved_value + self.getCF()
elif self.type == "forecast4daysTempMax":
return config.plugins.MetrixWeather.forecast4daysTempMax.saved_value + self.getCF()
elif self.type == "forecast4daysText":
return config.plugins.MetrixWeather.forecast4daysText.saved_value
elif self.type == "forecast4daysDay":
return config.plugins.MetrixWeather.forecast4daysDay.saved_value
elif self.type == "title":
return self.getCF() + " | " + config.plugins.MetrixWeather.currentLocation.saved_value
elif self.type == "CF":
return self.getCF()
else:
return ""
else:
return ""
except:
return ""
text = property(getText)
def getCF(self):
# DEACTIVATED FOR GIGABLUE UNIVERSE HD
if config.plugins.MetrixWeather.tempUnit.saved_value == "Fahrenheit":
return "°"
else:
return "°"
|
openmips/stbgui
|
lib/python/Components/Converter/OMMetrixWeather.py
|
Python
|
gpl-2.0
| 5,193
|
[
"VisIt"
] |
f15b4df04f10b53d68147047e1109187d5bccbd299e732bdc8b4d8798e20fb0d
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
#
# This software is licensed as described in the README.rst and LICENSE files,
# which you should have received as part of this distribution.
import argparse
from raspi_sensor.main import setup_default_mqtt_args
from raspi_relay.relay import Relay
def setup_args():
ap = argparse.ArgumentParser(prog='raspi-relay',
description='RPi.Relay can change relay state, via Raspberry Pi GPIO.',
epilog='For more info visit: https://github.com/ricco386/RPi/tree/master/RPi.Relay')
setup_default_mqtt_args(ap)
return ap.parse_args()
def main():
params = setup_args()
name = 'Relay'
if hasattr(params, 'name') and params.name:
name = params.name
r = Relay(name=name)
r.setup_args(params)
if hasattr(params, 'status') and params.status:
r.sensor_read()
print(r.get_relay_state())
else:
r.sense()
if __name__ == "__main__":
# execute only if run as a script
main()
|
ricco386/broadcaster
|
RPi.Relay/raspi_relay/main.py
|
Python
|
bsd-3-clause
| 1,053
|
[
"VisIt"
] |
52b36d3518d5676a7632764702e293c165a93d489ea1958e92b67f4452432742
|
'''Multiple Testing and P-Value Correction
Author: Josef Perktold
License: BSD-3
'''
import numpy as np
from statsmodels.stats._knockoff import RegressionFDR
__all__ = ['fdrcorrection', 'fdrcorrection_twostage', 'local_fdr',
'multipletests', 'NullDistribution', 'RegressionFDR']
# ==============================================
#
# Part 1: Multiple Tests and P-Value Correction
#
# ==============================================
def _ecdf(x):
'''no frills empirical cdf used in fdrcorrection
'''
nobs = len(x)
return np.arange(1,nobs+1)/float(nobs)
multitest_methods_names = {'b': 'Bonferroni',
's': 'Sidak',
'h': 'Holm',
'hs': 'Holm-Sidak',
'sh': 'Simes-Hochberg',
'ho': 'Hommel',
'fdr_bh': 'FDR Benjamini-Hochberg',
'fdr_by': 'FDR Benjamini-Yekutieli',
'fdr_tsbh': 'FDR 2-stage Benjamini-Hochberg',
'fdr_tsbky': 'FDR 2-stage Benjamini-Krieger-Yekutieli',
'fdr_gbs': 'FDR adaptive Gavrilov-Benjamini-Sarkar'
}
_alias_list = [['b', 'bonf', 'bonferroni'],
['s', 'sidak'],
['h', 'holm'],
['hs', 'holm-sidak'],
['sh', 'simes-hochberg'],
['ho', 'hommel'],
['fdr_bh', 'fdr_i', 'fdr_p', 'fdri', 'fdrp'],
['fdr_by', 'fdr_n', 'fdr_c', 'fdrn', 'fdrcorr'],
['fdr_tsbh', 'fdr_2sbh'],
['fdr_tsbky', 'fdr_2sbky', 'fdr_twostage'],
['fdr_gbs']
]
multitest_alias = {}
for m in _alias_list:
multitest_alias[m[0]] = m[0]
for a in m[1:]:
multitest_alias[a] = m[0]
def multipletests(pvals, alpha=0.05, method='hs', is_sorted=False,
returnsorted=False):
"""
Test results and p-value correction for multiple tests
Parameters
----------
pvals : array_like, 1-d
uncorrected p-values. Must be 1-dimensional.
alpha : float
FWER, family-wise error rate, e.g. 0.1
method : str
Method used for testing and adjustment of pvalues. Can be either the
full name or initial letters. Available methods are:
- `bonferroni` : one-step correction
- `sidak` : one-step correction
- `holm-sidak` : step down method using Sidak adjustments
- `holm` : step-down method using Bonferroni adjustments
- `simes-hochberg` : step-up method (independent)
- `hommel` : closed method based on Simes tests (non-negative)
- `fdr_bh` : Benjamini/Hochberg (non-negative)
- `fdr_by` : Benjamini/Yekutieli (negative)
- `fdr_tsbh` : two stage fdr correction (non-negative)
- `fdr_tsbky` : two stage fdr correction (non-negative)
is_sorted : bool
If False (default), the p_values will be sorted, but the corrected
pvalues are in the original order. If True, then it assumed that the
pvalues are already sorted in ascending order.
returnsorted : bool
not tested, return sorted p-values instead of original sequence
Returns
-------
reject : ndarray, boolean
true for hypothesis that can be rejected for given alpha
pvals_corrected : ndarray
p-values corrected for multiple tests
alphacSidak : float
corrected alpha for Sidak method
alphacBonf : float
corrected alpha for Bonferroni method
Notes
-----
There may be API changes for this function in the future.
Except for 'fdr_twostage', the p-value correction is independent of the
alpha specified as argument. In these cases the corrected p-values
can also be compared with a different alpha. In the case of 'fdr_twostage',
the corrected p-values are specific to the given alpha, see
``fdrcorrection_twostage``.
The 'fdr_gbs' procedure is not verified against another package, p-values
are derived from scratch and are not derived in the reference. In Monte
Carlo experiments the method worked correctly and maintained the false
discovery rate.
All procedures that are included, control FWER or FDR in the independent
case, and most are robust in the positively correlated case.
`fdr_gbs`: high power, fdr control for independent case and only small
violation in positively correlated case
**Timing**:
Most of the time with large arrays is spent in `argsort`. When
we want to calculate the p-value for several methods, then it is more
efficient to presort the pvalues, and put the results back into the
original order outside of the function.
Method='hommel' is very slow for large arrays, since it requires the
evaluation of n partitions, where n is the number of p-values.
"""
import gc
pvals = np.asarray(pvals)
alphaf = alpha # Notation ?
if not is_sorted:
sortind = np.argsort(pvals)
pvals = np.take(pvals, sortind)
ntests = len(pvals)
alphacSidak = 1 - np.power((1. - alphaf), 1./ntests)
alphacBonf = alphaf / float(ntests)
if method.lower() in ['b', 'bonf', 'bonferroni']:
reject = pvals <= alphacBonf
pvals_corrected = pvals * float(ntests)
elif method.lower() in ['s', 'sidak']:
reject = pvals <= alphacSidak
pvals_corrected = 1 - np.power((1. - pvals), ntests)
elif method.lower() in ['hs', 'holm-sidak']:
alphacSidak_all = 1 - np.power((1. - alphaf),
1./np.arange(ntests, 0, -1))
notreject = pvals > alphacSidak_all
del alphacSidak_all
nr_index = np.nonzero(notreject)[0]
if nr_index.size == 0:
# nonreject is empty, all rejected
notrejectmin = len(pvals)
else:
notrejectmin = np.min(nr_index)
notreject[notrejectmin:] = True
reject = ~notreject
del notreject
pvals_corrected_raw = 1 - np.power((1. - pvals),
np.arange(ntests, 0, -1))
pvals_corrected = np.maximum.accumulate(pvals_corrected_raw)
del pvals_corrected_raw
elif method.lower() in ['h', 'holm']:
notreject = pvals > alphaf / np.arange(ntests, 0, -1)
nr_index = np.nonzero(notreject)[0]
if nr_index.size == 0:
# nonreject is empty, all rejected
notrejectmin = len(pvals)
else:
notrejectmin = np.min(nr_index)
notreject[notrejectmin:] = True
reject = ~notreject
pvals_corrected_raw = pvals * np.arange(ntests, 0, -1)
pvals_corrected = np.maximum.accumulate(pvals_corrected_raw)
del pvals_corrected_raw
gc.collect()
elif method.lower() in ['sh', 'simes-hochberg']:
alphash = alphaf / np.arange(ntests, 0, -1)
reject = pvals <= alphash
rejind = np.nonzero(reject)
if rejind[0].size > 0:
rejectmax = np.max(np.nonzero(reject))
reject[:rejectmax] = True
pvals_corrected_raw = np.arange(ntests, 0, -1) * pvals
pvals_corrected = np.minimum.accumulate(pvals_corrected_raw[::-1])[::-1]
del pvals_corrected_raw
elif method.lower() in ['ho', 'hommel']:
# we need a copy because we overwrite it in a loop
a = pvals.copy()
for m in range(ntests, 1, -1):
cim = np.min(m * pvals[-m:] / np.arange(1,m+1.))
a[-m:] = np.maximum(a[-m:], cim)
a[:-m] = np.maximum(a[:-m], np.minimum(m * pvals[:-m], cim))
pvals_corrected = a
reject = a <= alphaf
elif method.lower() in ['fdr_bh', 'fdr_i', 'fdr_p', 'fdri', 'fdrp']:
# delegate, call with sorted pvals
reject, pvals_corrected = fdrcorrection(pvals, alpha=alpha,
method='indep',
is_sorted=True)
elif method.lower() in ['fdr_by', 'fdr_n', 'fdr_c', 'fdrn', 'fdrcorr']:
# delegate, call with sorted pvals
reject, pvals_corrected = fdrcorrection(pvals, alpha=alpha,
method='n',
is_sorted=True)
elif method.lower() in ['fdr_tsbky', 'fdr_2sbky', 'fdr_twostage']:
# delegate, call with sorted pvals
reject, pvals_corrected = fdrcorrection_twostage(pvals, alpha=alpha,
method='bky',
is_sorted=True)[:2]
elif method.lower() in ['fdr_tsbh', 'fdr_2sbh']:
# delegate, call with sorted pvals
reject, pvals_corrected = fdrcorrection_twostage(pvals, alpha=alpha,
method='bh',
is_sorted=True)[:2]
elif method.lower() in ['fdr_gbs']:
#adaptive stepdown in Gavrilov, Benjamini, Sarkar, Annals of Statistics 2009
## notreject = pvals > alphaf / np.arange(ntests, 0, -1) #alphacSidak
## notrejectmin = np.min(np.nonzero(notreject))
## notreject[notrejectmin:] = True
## reject = ~notreject
ii = np.arange(1, ntests + 1)
q = (ntests + 1. - ii)/ii * pvals / (1. - pvals)
pvals_corrected_raw = np.maximum.accumulate(q) #up requirementd
pvals_corrected = np.minimum.accumulate(pvals_corrected_raw[::-1])[::-1]
del pvals_corrected_raw
reject = pvals_corrected <= alpha
else:
raise ValueError('method not recognized')
if pvals_corrected is not None: #not necessary anymore
pvals_corrected[pvals_corrected>1] = 1
if is_sorted or returnsorted:
return reject, pvals_corrected, alphacSidak, alphacBonf
else:
pvals_corrected_ = np.empty_like(pvals_corrected)
pvals_corrected_[sortind] = pvals_corrected
del pvals_corrected
reject_ = np.empty_like(reject)
reject_[sortind] = reject
return reject_, pvals_corrected_, alphacSidak, alphacBonf
def fdrcorrection(pvals, alpha=0.05, method='indep', is_sorted=False):
'''pvalue correction for false discovery rate
This covers Benjamini/Hochberg for independent or positively correlated and
Benjamini/Yekutieli for general or negatively correlated tests. Both are
available in the function multipletests, as method=`fdr_bh`, resp. `fdr_by`.
Parameters
----------
pvals : array_like
set of p-values of the individual tests.
alpha : float
error rate
method : {'indep', 'negcorr'}
is_sorted : bool
If False (default), the p_values will be sorted, but the corrected
pvalues are in the original order. If True, then it assumed that the
pvalues are already sorted in ascending order.
Returns
-------
rejected : ndarray, bool
True if a hypothesis is rejected, False if not
pvalue-corrected : ndarray
pvalues adjusted for multiple hypothesis testing to limit FDR
Notes
-----
If there is prior information on the fraction of true hypothesis, then alpha
should be set to alpha * m/m_0 where m is the number of tests,
given by the p-values, and m_0 is an estimate of the true hypothesis.
(see Benjamini, Krieger and Yekuteli)
The two-step method of Benjamini, Krieger and Yekutiel that estimates the number
of false hypotheses will be available (soon).
Method names can be abbreviated to first letter, 'i' or 'p' for fdr_bh and 'n' for
fdr_by.
'''
pvals = np.asarray(pvals)
if not is_sorted:
pvals_sortind = np.argsort(pvals)
pvals_sorted = np.take(pvals, pvals_sortind)
else:
pvals_sorted = pvals # alias
if method in ['i', 'indep', 'p', 'poscorr']:
ecdffactor = _ecdf(pvals_sorted)
elif method in ['n', 'negcorr']:
cm = np.sum(1./np.arange(1, len(pvals_sorted)+1)) #corrected this
ecdffactor = _ecdf(pvals_sorted) / cm
## elif method in ['n', 'negcorr']:
## cm = np.sum(np.arange(len(pvals)))
## ecdffactor = ecdf(pvals_sorted)/cm
else:
raise ValueError('only indep and negcorr implemented')
reject = pvals_sorted <= ecdffactor*alpha
if reject.any():
rejectmax = max(np.nonzero(reject)[0])
reject[:rejectmax] = True
pvals_corrected_raw = pvals_sorted / ecdffactor
pvals_corrected = np.minimum.accumulate(pvals_corrected_raw[::-1])[::-1]
del pvals_corrected_raw
pvals_corrected[pvals_corrected>1] = 1
if not is_sorted:
pvals_corrected_ = np.empty_like(pvals_corrected)
pvals_corrected_[pvals_sortind] = pvals_corrected
del pvals_corrected
reject_ = np.empty_like(reject)
reject_[pvals_sortind] = reject
return reject_, pvals_corrected_
else:
return reject, pvals_corrected
def fdrcorrection_twostage(pvals, alpha=0.05, method='bky', iter=False,
is_sorted=False):
'''(iterated) two stage linear step-up procedure with estimation of number of true
hypotheses
Benjamini, Krieger and Yekuteli, procedure in Definition 6
Parameters
----------
pvals : array_like
set of p-values of the individual tests.
alpha : float
error rate
method : {'bky', 'bh')
see Notes for details
* 'bky' - implements the procedure in Definition 6 of Benjamini, Krieger
and Yekuteli 2006
* 'bh' - the two stage method of Benjamini and Hochberg
iter : bool
Returns
-------
rejected : ndarray, bool
True if a hypothesis is rejected, False if not
pvalue-corrected : ndarray
pvalues adjusted for multiple hypotheses testing to limit FDR
m0 : int
ntest - rej, estimated number of true hypotheses
alpha_stages : list of floats
A list of alphas that have been used at each stage
Notes
-----
The returned corrected p-values are specific to the given alpha, they
cannot be used for a different alpha.
The returned corrected p-values are from the last stage of the fdr_bh
linear step-up procedure (fdrcorrection0 with method='indep') corrected
for the estimated fraction of true hypotheses.
This means that the rejection decision can be obtained with
``pval_corrected <= alpha``, where ``alpha`` is the original significance
level.
(Note: This has changed from earlier versions (<0.5.0) of statsmodels.)
BKY described several other multi-stage methods, which would be easy to implement.
However, in their simulation the simple two-stage method (with iter=False) was the
most robust to the presence of positive correlation
TODO: What should be returned?
'''
pvals = np.asarray(pvals)
if not is_sorted:
pvals_sortind = np.argsort(pvals)
pvals = np.take(pvals, pvals_sortind)
ntests = len(pvals)
if method == 'bky':
fact = (1.+alpha)
alpha_prime = alpha / fact
elif method == 'bh':
fact = 1.
alpha_prime = alpha
else:
raise ValueError("only 'bky' and 'bh' are available as method")
alpha_stages = [alpha_prime]
rej, pvalscorr = fdrcorrection(pvals, alpha=alpha_prime, method='indep',
is_sorted=True)
r1 = rej.sum()
if (r1 == 0) or (r1 == ntests):
return rej, pvalscorr * fact, ntests - r1, alpha_stages
ri_old = r1
while True:
ntests0 = 1.0 * ntests - ri_old
alpha_star = alpha_prime * ntests / ntests0
alpha_stages.append(alpha_star)
#print ntests0, alpha_star
rej, pvalscorr = fdrcorrection(pvals, alpha=alpha_star, method='indep',
is_sorted=True)
ri = rej.sum()
if (not iter) or ri == ri_old:
break
elif ri < ri_old:
# prevent cycles and endless loops
raise RuntimeError(" oops - should not be here")
ri_old = ri
# make adjustment to pvalscorr to reflect estimated number of Non-Null cases
# decision is then pvalscorr < alpha (or <=)
pvalscorr *= ntests0 * 1.0 / ntests
if method == 'bky':
pvalscorr *= (1. + alpha)
if not is_sorted:
pvalscorr_ = np.empty_like(pvalscorr)
pvalscorr_[pvals_sortind] = pvalscorr
del pvalscorr
reject = np.empty_like(rej)
reject[pvals_sortind] = rej
return reject, pvalscorr_, ntests - ri, alpha_stages
else:
return rej, pvalscorr, ntests - ri, alpha_stages
def local_fdr(zscores, null_proportion=1.0, null_pdf=None, deg=7,
nbins=30, alpha=0):
"""
Calculate local FDR values for a list of Z-scores.
Parameters
----------
zscores : array_like
A vector of Z-scores
null_proportion : float
The assumed proportion of true null hypotheses
null_pdf : function mapping reals to positive reals
The density of null Z-scores; if None, use standard normal
deg : int
The maximum exponent in the polynomial expansion of the
density of non-null Z-scores
nbins : int
The number of bins for estimating the marginal density
of Z-scores.
alpha : float
Use Poisson ridge regression with parameter alpha to estimate
the density of non-null Z-scores.
Returns
-------
fdr : array_like
A vector of FDR values
References
----------
B Efron (2008). Microarrays, Empirical Bayes, and the Two-Groups
Model. Statistical Science 23:1, 1-22.
Examples
--------
Basic use (the null Z-scores are taken to be standard normal):
>>> from statsmodels.stats.multitest import local_fdr
>>> import numpy as np
>>> zscores = np.random.randn(30)
>>> fdr = local_fdr(zscores)
Use a Gaussian null distribution estimated from the data:
>>> null = EmpiricalNull(zscores)
>>> fdr = local_fdr(zscores, null_pdf=null.pdf)
"""
from statsmodels.genmod.generalized_linear_model import GLM
from statsmodels.genmod.generalized_linear_model import families
from statsmodels.regression.linear_model import OLS
# Bins for Poisson modeling of the marginal Z-score density
minz = min(zscores)
maxz = max(zscores)
bins = np.linspace(minz, maxz, nbins)
# Bin counts
zhist = np.histogram(zscores, bins)[0]
# Bin centers
zbins = (bins[:-1] + bins[1:]) / 2
# The design matrix at bin centers
dmat = np.vander(zbins, deg + 1)
# Rescale the design matrix
sd = dmat.std(0)
ii = sd >1e-8
dmat[:, ii] /= sd[ii]
start = OLS(np.log(1 + zhist), dmat).fit().params
# Poisson regression
if alpha > 0:
md = GLM(zhist, dmat, family=families.Poisson()).fit_regularized(L1_wt=0, alpha=alpha, start_params=start)
else:
md = GLM(zhist, dmat, family=families.Poisson()).fit(start_params=start)
# The design matrix for all Z-scores
dmat_full = np.vander(zscores, deg + 1)
dmat_full[:, ii] /= sd[ii]
# The height of the estimated marginal density of Z-scores,
# evaluated at every observed Z-score.
fz = md.predict(dmat_full) / (len(zscores) * (bins[1] - bins[0]))
# The null density.
if null_pdf is None:
f0 = np.exp(-0.5 * zscores**2) / np.sqrt(2 * np.pi)
else:
f0 = null_pdf(zscores)
# The local FDR values
fdr = null_proportion * f0 / fz
fdr = np.clip(fdr, 0, 1)
return fdr
class NullDistribution(object):
"""
Estimate a Gaussian distribution for the null Z-scores.
The observed Z-scores consist of both null and non-null values.
The fitted distribution of null Z-scores is Gaussian, but may have
non-zero mean and/or non-unit scale.
Parameters
----------
zscores : array_like
The observed Z-scores.
null_lb : float
Z-scores between `null_lb` and `null_ub` are all considered to be
true null hypotheses.
null_ub : float
See `null_lb`.
estimate_mean : bool
If True, estimate the mean of the distribution. If False, the
mean is fixed at zero.
estimate_scale : bool
If True, estimate the scale of the distribution. If False, the
scale parameter is fixed at 1.
estimate_null_proportion : bool
If True, estimate the proportion of true null hypotheses (i.e.
the proportion of z-scores with expected value zero). If False,
this parameter is fixed at 1.
Attributes
----------
mean : float
The estimated mean of the empirical null distribution
sd : float
The estimated standard deviation of the empirical null distribution
null_proportion : float
The estimated proportion of true null hypotheses among all hypotheses
References
----------
B Efron (2008). Microarrays, Empirical Bayes, and the Two-Groups
Model. Statistical Science 23:1, 1-22.
Notes
-----
See also:
http://nipy.org/nipy/labs/enn.html#nipy.algorithms.statistics.empirical_pvalue.NormalEmpiricalNull.fdr
"""
def __init__(self, zscores, null_lb=-1, null_ub=1, estimate_mean=True,
estimate_scale=True, estimate_null_proportion=False):
# Extract the null z-scores
ii = np.flatnonzero((zscores >= null_lb) & (zscores <= null_ub))
if len(ii) == 0:
raise RuntimeError("No Z-scores fall between null_lb and null_ub")
zscores0 = zscores[ii]
# Number of Z-scores, and null Z-scores
n_zs, n_zs0 = len(zscores), len(zscores0)
# Unpack and transform the parameters to the natural scale, hold
# parameters fixed as specified.
def xform(params):
mean = 0.
sd = 1.
prob = 1.
ii = 0
if estimate_mean:
mean = params[ii]
ii += 1
if estimate_scale:
sd = np.exp(params[ii])
ii += 1
if estimate_null_proportion:
prob = 1 / (1 + np.exp(-params[ii]))
return mean, sd, prob
from scipy.stats.distributions import norm
def fun(params):
"""
Negative log-likelihood of z-scores.
The function has three arguments, packed into a vector:
mean : location parameter
logscale : log of the scale parameter
logitprop : logit of the proportion of true nulls
The implementation follows section 4 from Efron 2008.
"""
d, s, p = xform(params)
# Mass within the central region
central_mass = (norm.cdf((null_ub - d) / s) -
norm.cdf((null_lb - d) / s))
# Probability that a Z-score is null and is in the central region
cp = p * central_mass
# Binomial term
rval = n_zs0 * np.log(cp) + (n_zs - n_zs0) * np.log(1 - cp)
# Truncated Gaussian term for null Z-scores
zv = (zscores0 - d) / s
rval += np.sum(-zv**2 / 2) - n_zs0 * np.log(s)
rval -= n_zs0 * np.log(central_mass)
return -rval
# Estimate the parameters
from scipy.optimize import minimize
# starting values are mean = 0, scale = 1, p0 ~ 1
mz = minimize(fun, np.r_[0., 0, 3], method="Nelder-Mead")
mean, sd, prob = xform(mz['x'])
self.mean = mean
self.sd = sd
self.null_proportion = prob
# The fitted null density function
def pdf(self, zscores):
"""
Evaluates the fitted empirical null Z-score density.
Parameters
----------
zscores : scalar or array_like
The point or points at which the density is to be
evaluated.
Returns
-------
The empirical null Z-score density evaluated at the given
points.
"""
zval = (zscores - self.mean) / self.sd
return np.exp(-0.5*zval**2 - np.log(self.sd) - 0.5*np.log(2*np.pi))
|
jseabold/statsmodels
|
statsmodels/stats/multitest.py
|
Python
|
bsd-3-clause
| 24,374
|
[
"Gaussian"
] |
50ae762692d3b20708a34beeb81d87a3dc99417d24223bbfd76cb6757b3d0cb6
|
#!/usr/bin/env python
########################################################################
# $HeadURL$
# File : dirac-install-mysql
# Author : Ricardo Graciani
########################################################################
"""
Do the initial installation and configuration of the DIRAC MySQL server
"""
__RCSID__ = "$Id$"
from DIRAC.Core.Base import Script
Script.setUsageMessage( '\n'.join( [ __doc__.split( '\n' )[1] ] ) )
Script.parseCommandLine()
#
from DIRAC.FrameworkSystem.Client.ComponentInstaller import gComponentInstaller
#
gComponentInstaller.exitOnError = True
#
gComponentInstaller.getMySQLPasswords()
#
gComponentInstaller.installMySQL()
#
gComponentInstaller._addMySQLToDiracCfg()
|
Andrew-McNab-UK/DIRAC
|
Core/scripts/dirac-install-mysql.py
|
Python
|
gpl-3.0
| 720
|
[
"DIRAC"
] |
f7e766af2097423f7145a981d9d33566dfe0cb105b45d8176162c13aca859f00
|
# Copyright 2010 Google Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import base64
import binascii
import os
import re
import StringIO
from boto.exception import BotoClientError
from boto.s3.key import Key as S3Key
from boto.s3.keyfile import KeyFile
from boto.utils import compute_hash
from boto.utils import get_utf8_value
class Key(S3Key):
"""
Represents a key (object) in a GS bucket.
:ivar bucket: The parent :class:`boto.gs.bucket.Bucket`.
:ivar name: The name of this Key object.
:ivar metadata: A dictionary containing user metadata that you
wish to store with the object or that has been retrieved from
an existing object.
:ivar cache_control: The value of the `Cache-Control` HTTP header.
:ivar content_type: The value of the `Content-Type` HTTP header.
:ivar content_encoding: The value of the `Content-Encoding` HTTP header.
:ivar content_disposition: The value of the `Content-Disposition` HTTP
header.
:ivar content_language: The value of the `Content-Language` HTTP header.
:ivar etag: The `etag` associated with this object.
:ivar last_modified: The string timestamp representing the last
time this object was modified in GS.
:ivar owner: The ID of the owner of this object.
:ivar storage_class: The storage class of the object. Currently, one of:
STANDARD | DURABLE_REDUCED_AVAILABILITY.
:ivar md5: The MD5 hash of the contents of the object.
:ivar size: The size, in bytes, of the object.
:ivar generation: The generation number of the object.
:ivar metageneration: The generation number of the object metadata.
:ivar encrypted: Whether the object is encrypted while at rest on
the server.
:ivar cloud_hashes: Dictionary of checksums as supplied by the storage
provider.
"""
def __init__(self, bucket=None, name=None, generation=None):
super(Key, self).__init__(bucket=bucket, name=name)
self.generation = generation
self.meta_generation = None
self.cloud_hashes = {}
self.component_count = None
def __repr__(self):
if self.generation and self.metageneration:
ver_str = '#%s.%s' % (self.generation, self.metageneration)
else:
ver_str = ''
if self.bucket:
return '<Key: %s,%s%s>' % (self.bucket.name, self.name, ver_str)
else:
return '<Key: None,%s%s>' % (self.name, ver_str)
def endElement(self, name, value, connection):
if name == 'Key':
self.name = value
elif name == 'ETag':
self.etag = value
elif name == 'IsLatest':
if value == 'true':
self.is_latest = True
else:
self.is_latest = False
elif name == 'LastModified':
self.last_modified = value
elif name == 'Size':
self.size = int(value)
elif name == 'StorageClass':
self.storage_class = value
elif name == 'Owner':
pass
elif name == 'VersionId':
self.version_id = value
elif name == 'Generation':
self.generation = value
elif name == 'MetaGeneration':
self.metageneration = value
else:
setattr(self, name, value)
def handle_version_headers(self, resp, force=False):
self.metageneration = resp.getheader('x-goog-metageneration', None)
self.generation = resp.getheader('x-goog-generation', None)
def handle_restore_headers(self, response):
return
def handle_addl_headers(self, headers):
for key, value in headers:
if key == 'x-goog-hash':
for hash_pair in value.split(','):
alg, b64_digest = hash_pair.strip().split('=', 1)
self.cloud_hashes[alg] = binascii.a2b_base64(b64_digest)
elif key == 'x-goog-component-count':
self.component_count = int(value)
elif key == 'x-goog-generation':
self.generation = value
# Use x-goog-stored-content-encoding and
# x-goog-stored-content-length to indicate original content length
# and encoding, which are transcoding-invariant (so are preferable
# over using content-encoding and size headers).
elif key == 'x-goog-stored-content-encoding':
self.content_encoding = value
elif key == 'x-goog-stored-content-length':
self.size = int(value)
def open_read(self, headers=None, query_args='',
override_num_retries=None, response_headers=None):
"""
Open this key for reading
:type headers: dict
:param headers: Headers to pass in the web request
:type query_args: string
:param query_args: Arguments to pass in the query string
(ie, 'torrent')
:type override_num_retries: int
:param override_num_retries: If not None will override configured
num_retries parameter for underlying GET.
:type response_headers: dict
:param response_headers: A dictionary containing HTTP
headers/values that will override any headers associated
with the stored object in the response. See
http://goo.gl/EWOPb for details.
"""
# For GCS we need to include the object generation in the query args.
# The rest of the processing is handled in the parent class.
if self.generation:
if query_args:
query_args += '&'
query_args += 'generation=%s' % self.generation
super(Key, self).open_read(headers=headers, query_args=query_args,
override_num_retries=override_num_retries,
response_headers=response_headers)
def get_file(self, fp, headers=None, cb=None, num_cb=10,
torrent=False, version_id=None, override_num_retries=None,
response_headers=None, hash_algs=None):
query_args = None
if self.generation:
query_args = ['generation=%s' % self.generation]
self._get_file_internal(fp, headers=headers, cb=cb, num_cb=num_cb,
override_num_retries=override_num_retries,
response_headers=response_headers,
hash_algs=hash_algs,
query_args=query_args)
def get_contents_to_file(self, fp, headers=None,
cb=None, num_cb=10,
torrent=False,
version_id=None,
res_download_handler=None,
response_headers=None,
hash_algs=None):
"""
Retrieve an object from GCS using the name of the Key object as the
key in GCS. Write the contents of the object to the file pointed
to by 'fp'.
:type fp: File -like object
:param fp:
:type headers: dict
:param headers: additional HTTP headers that will be sent with
the GET request.
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept two
integer parameters, the first representing the number of
bytes that have been successfully transmitted to GCS and
the second representing the size of the to be transmitted
object.
:type cb: int
:param num_cb: (optional) If a callback is specified with the
cb parameter this parameter determines the granularity of
the callback by defining the maximum number of times the
callback will be called during the file transfer.
:type torrent: bool
:param torrent: If True, returns the contents of a torrent
file as a string.
:type res_upload_handler: ResumableDownloadHandler
:param res_download_handler: If provided, this handler will
perform the download.
:type response_headers: dict
:param response_headers: A dictionary containing HTTP
headers/values that will override any headers associated
with the stored object in the response. See
http://goo.gl/sMkcC for details.
"""
if self.bucket is not None:
if res_download_handler:
res_download_handler.get_file(self, fp, headers, cb, num_cb,
torrent=torrent,
version_id=version_id,
hash_algs=hash_algs)
else:
self.get_file(fp, headers, cb, num_cb, torrent=torrent,
version_id=version_id,
response_headers=response_headers,
hash_algs=hash_algs)
def compute_hash(self, fp, algorithm, size=None):
"""
:type fp: file
:param fp: File pointer to the file to hash. The file
pointer will be reset to the same position before the
method returns.
:type algorithm: zero-argument constructor for hash objects that
implements update() and digest() (e.g. hashlib.md5)
:type size: int
:param size: (optional) The Maximum number of bytes to read
from the file pointer (fp). This is useful when uploading
a file in multiple parts where the file is being split
in place into different parts. Less bytes may be available.
"""
hex_digest, b64_digest, data_size = compute_hash(
fp, size=size, hash_algorithm=algorithm)
# The internal implementation of compute_hash() needs to return the
# data size, but we don't want to return that value to the external
# caller because it changes the class interface (i.e. it might
# break some code), so we consume the third tuple value here and
# return the remainder of the tuple to the caller, thereby preserving
# the existing interface.
self.size = data_size
return (hex_digest, b64_digest)
def send_file(self, fp, headers=None, cb=None, num_cb=10,
query_args=None, chunked_transfer=False, size=None,
hash_algs=None):
"""
Upload a file to GCS.
:type fp: file
:param fp: The file pointer to upload. The file pointer must
point point at the offset from which you wish to upload.
ie. if uploading the full file, it should point at the
start of the file. Normally when a file is opened for
reading, the fp will point at the first byte. See the
bytes parameter below for more info.
:type headers: dict
:param headers: The headers to pass along with the PUT request
:type num_cb: int
:param num_cb: (optional) If a callback is specified with the
cb parameter this parameter determines the granularity of
the callback by defining the maximum number of times the
callback will be called during the file
transfer. Providing a negative integer will cause your
callback to be called with each buffer read.
:type query_args: string
:param query_args: Arguments to pass in the query string.
:type chunked_transfer: boolean
:param chunked_transfer: (optional) If true, we use chunked
Transfer-Encoding.
:type size: int
:param size: (optional) The Maximum number of bytes to read
from the file pointer (fp). This is useful when uploading
a file in multiple parts where you are splitting the file
up into different ranges to be uploaded. If not specified,
the default behaviour is to read all bytes from the file
pointer. Less bytes may be available.
:type hash_algs: dictionary
:param hash_algs: (optional) Dictionary of hash algorithms and
corresponding hashing class that implements update() and digest().
Defaults to {'md5': hashlib.md5}.
"""
self._send_file_internal(fp, headers=headers, cb=cb, num_cb=num_cb,
query_args=query_args,
chunked_transfer=chunked_transfer, size=size,
hash_algs=hash_algs)
def delete(self, headers=None):
return self.bucket.delete_key(self.name, version_id=self.version_id,
generation=self.generation,
headers=headers)
def add_email_grant(self, permission, email_address):
"""
Convenience method that provides a quick way to add an email grant to a
key. This method retrieves the current ACL, creates a new grant based on
the parameters passed in, adds that grant to the ACL and then PUT's the
new ACL back to GS.
:type permission: string
:param permission: The permission being granted. Should be one of:
READ|FULL_CONTROL
See http://code.google.com/apis/storage/docs/developer-guide.html#authorization
for more details on permissions.
:type email_address: string
:param email_address: The email address associated with the Google
account to which you are granting the permission.
"""
acl = self.get_acl()
acl.add_email_grant(permission, email_address)
self.set_acl(acl)
def add_user_grant(self, permission, user_id):
"""
Convenience method that provides a quick way to add a canonical user
grant to a key. This method retrieves the current ACL, creates a new
grant based on the parameters passed in, adds that grant to the ACL and
then PUT's the new ACL back to GS.
:type permission: string
:param permission: The permission being granted. Should be one of:
READ|FULL_CONTROL
See http://code.google.com/apis/storage/docs/developer-guide.html#authorization
for more details on permissions.
:type user_id: string
:param user_id: The canonical user id associated with the GS account to
which you are granting the permission.
"""
acl = self.get_acl()
acl.add_user_grant(permission, user_id)
self.set_acl(acl)
def add_group_email_grant(self, permission, email_address, headers=None):
"""
Convenience method that provides a quick way to add an email group
grant to a key. This method retrieves the current ACL, creates a new
grant based on the parameters passed in, adds that grant to the ACL and
then PUT's the new ACL back to GS.
:type permission: string
:param permission: The permission being granted. Should be one of:
READ|FULL_CONTROL
See http://code.google.com/apis/storage/docs/developer-guide.html#authorization
for more details on permissions.
:type email_address: string
:param email_address: The email address associated with the Google
Group to which you are granting the permission.
"""
acl = self.get_acl(headers=headers)
acl.add_group_email_grant(permission, email_address)
self.set_acl(acl, headers=headers)
def add_group_grant(self, permission, group_id):
"""
Convenience method that provides a quick way to add a canonical group
grant to a key. This method retrieves the current ACL, creates a new
grant based on the parameters passed in, adds that grant to the ACL and
then PUT's the new ACL back to GS.
:type permission: string
:param permission: The permission being granted. Should be one of:
READ|FULL_CONTROL
See http://code.google.com/apis/storage/docs/developer-guide.html#authorization
for more details on permissions.
:type group_id: string
:param group_id: The canonical group id associated with the Google
Groups account you are granting the permission to.
"""
acl = self.get_acl()
acl.add_group_grant(permission, group_id)
self.set_acl(acl)
def set_contents_from_file(self, fp, headers=None, replace=True,
cb=None, num_cb=10, policy=None, md5=None,
res_upload_handler=None, size=None, rewind=False,
if_generation=None):
"""
Store an object in GS using the name of the Key object as the
key in GS and the contents of the file pointed to by 'fp' as the
contents.
:type fp: file
:param fp: the file whose contents are to be uploaded
:type headers: dict
:param headers: additional HTTP headers to be sent with the PUT request.
:type replace: bool
:param replace: If this parameter is False, the method will first check
to see if an object exists in the bucket with the same key. If it
does, it won't overwrite it. The default value is True which will
overwrite the object.
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept two integer
parameters, the first representing the number of bytes that have
been successfully transmitted to GS and the second representing the
total number of bytes that need to be transmitted.
:type num_cb: int
:param num_cb: (optional) If a callback is specified with the cb
parameter, this parameter determines the granularity of the callback
by defining the maximum number of times the callback will be called
during the file transfer.
:type policy: :class:`boto.gs.acl.CannedACLStrings`
:param policy: A canned ACL policy that will be applied to the new key
in GS.
:type md5: A tuple containing the hexdigest version of the MD5 checksum
of the file as the first element and the Base64-encoded version of
the plain checksum as the second element. This is the same format
returned by the compute_md5 method.
:param md5: If you need to compute the MD5 for any reason prior to
upload, it's silly to have to do it twice so this param, if present,
will be used as the MD5 values of the file. Otherwise, the checksum
will be computed.
:type res_upload_handler: ResumableUploadHandler
:param res_upload_handler: If provided, this handler will perform the
upload.
:type size: int
:param size: (optional) The Maximum number of bytes to read from
the file pointer (fp). This is useful when uploading
a file in multiple parts where you are splitting the
file up into different ranges to be uploaded. If not
specified, the default behaviour is to read all bytes
from the file pointer. Less bytes may be available.
Notes:
1. The "size" parameter currently cannot be used when
a resumable upload handler is given but is still
useful for uploading part of a file as implemented
by the parent class.
2. At present Google Cloud Storage does not support
multipart uploads.
:type rewind: bool
:param rewind: (optional) If True, the file pointer (fp) will be
rewound to the start before any bytes are read from
it. The default behaviour is False which reads from
the current position of the file pointer (fp).
:type if_generation: int
:param if_generation: (optional) If set to a generation number, the
object will only be written to if its current generation number is
this value. If set to the value 0, the object will only be written
if it doesn't already exist.
:rtype: int
:return: The number of bytes written to the key.
TODO: At some point we should refactor the Bucket and Key classes,
to move functionality common to all providers into a parent class,
and provider-specific functionality into subclasses (rather than
just overriding/sharing code the way it currently works).
"""
provider = self.bucket.connection.provider
if res_upload_handler and size:
# could use size instead of file_length if provided but...
raise BotoClientError(
'"size" param not supported for resumable uploads.')
headers = headers or {}
if policy:
headers[provider.acl_header] = policy
if rewind:
# caller requests reading from beginning of fp.
fp.seek(0, os.SEEK_SET)
else:
# The following seek/tell/seek logic is intended
# to detect applications using the older interface to
# set_contents_from_file(), which automatically rewound the
# file each time the Key was reused. This changed with commit
# 14ee2d03f4665fe20d19a85286f78d39d924237e, to support uploads
# split into multiple parts and uploaded in parallel, and at
# the time of that commit this check was added because otherwise
# older programs would get a success status and upload an empty
# object. Unfortuantely, it's very inefficient for fp's implemented
# by KeyFile (used, for example, by gsutil when copying between
# providers). So, we skip the check for the KeyFile case.
# TODO: At some point consider removing this seek/tell/seek
# logic, after enough time has passed that it's unlikely any
# programs remain that assume the older auto-rewind interface.
if not isinstance(fp, KeyFile):
spos = fp.tell()
fp.seek(0, os.SEEK_END)
if fp.tell() == spos:
fp.seek(0, os.SEEK_SET)
if fp.tell() != spos:
# Raise an exception as this is likely a programming
# error whereby there is data before the fp but nothing
# after it.
fp.seek(spos)
raise AttributeError('fp is at EOF. Use rewind option '
'or seek() to data start.')
# seek back to the correct position.
fp.seek(spos)
if hasattr(fp, 'name'):
self.path = fp.name
if self.bucket is not None:
if isinstance(fp, KeyFile):
# Avoid EOF seek for KeyFile case as it's very inefficient.
key = fp.getkey()
size = key.size - fp.tell()
self.size = size
# At present both GCS and S3 use MD5 for the etag for
# non-multipart-uploaded objects. If the etag is 32 hex
# chars use it as an MD5, to avoid having to read the file
# twice while transferring.
if (re.match('^"[a-fA-F0-9]{32}"$', key.etag)):
etag = key.etag.strip('"')
md5 = (etag, base64.b64encode(binascii.unhexlify(etag)))
if size:
self.size = size
else:
# If md5 is provided, still need to size so
# calculate based on bytes to end of content
spos = fp.tell()
fp.seek(0, os.SEEK_END)
self.size = fp.tell() - spos
fp.seek(spos)
size = self.size
if md5 is None:
md5 = self.compute_md5(fp, size)
self.md5 = md5[0]
self.base64md5 = md5[1]
if self.name is None:
self.name = self.md5
if not replace:
if self.bucket.lookup(self.name):
return
if if_generation is not None:
headers['x-goog-if-generation-match'] = str(if_generation)
if res_upload_handler:
res_upload_handler.send_file(self, fp, headers, cb, num_cb)
else:
# Not a resumable transfer so use basic send_file mechanism.
self.send_file(fp, headers, cb, num_cb, size=size)
def set_contents_from_filename(self, filename, headers=None, replace=True,
cb=None, num_cb=10, policy=None, md5=None,
reduced_redundancy=None,
res_upload_handler=None,
if_generation=None):
"""
Store an object in GS using the name of the Key object as the
key in GS and the contents of the file named by 'filename'.
See set_contents_from_file method for details about the
parameters.
:type filename: string
:param filename: The name of the file that you want to put onto GS
:type headers: dict
:param headers: Additional headers to pass along with the request to GS.
:type replace: bool
:param replace: If True, replaces the contents of the file if it
already exists.
:type cb: function
:param cb: (optional) a callback function that will be called to report
progress on the download. The callback should accept two integer
parameters, the first representing the number of bytes that have
been successfully transmitted from GS and the second representing
the total number of bytes that need to be transmitted.
:type cb: int
:param num_cb: (optional) If a callback is specified with the cb
parameter this parameter determines the granularity of the callback
by defining the maximum number of times the callback will be called
during the file transfer.
:type policy: :class:`boto.gs.acl.CannedACLStrings`
:param policy: A canned ACL policy that will be applied to the new key
in GS.
:type md5: A tuple containing the hexdigest version of the MD5 checksum
of the file as the first element and the Base64-encoded version of
the plain checksum as the second element. This is the same format
returned by the compute_md5 method.
:param md5: If you need to compute the MD5 for any reason prior to
upload, it's silly to have to do it twice so this param, if present,
will be used as the MD5 values of the file. Otherwise, the checksum
will be computed.
:type res_upload_handler: ResumableUploadHandler
:param res_upload_handler: If provided, this handler will perform the
upload.
:type if_generation: int
:param if_generation: (optional) If set to a generation number, the
object will only be written to if its current generation number is
this value. If set to the value 0, the object will only be written
if it doesn't already exist.
"""
# Clear out any previously computed hashes, since we are setting the
# content.
self.local_hashes = {}
with open(filename, 'rb') as fp:
self.set_contents_from_file(fp, headers, replace, cb, num_cb,
policy, md5, res_upload_handler,
if_generation=if_generation)
def set_contents_from_string(self, s, headers=None, replace=True,
cb=None, num_cb=10, policy=None, md5=None,
if_generation=None):
"""
Store an object in GCS using the name of the Key object as the
key in GCS and the string 's' as the contents.
See set_contents_from_file method for details about the
parameters.
:type headers: dict
:param headers: Additional headers to pass along with the
request to AWS.
:type replace: bool
:param replace: If True, replaces the contents of the file if
it already exists.
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept
two integer parameters, the first representing the
number of bytes that have been successfully
transmitted to GCS and the second representing the
size of the to be transmitted object.
:type cb: int
:param num_cb: (optional) If a callback is specified with
the cb parameter this parameter determines the
granularity of the callback by defining
the maximum number of times the callback will
be called during the file transfer.
:type policy: :class:`boto.gs.acl.CannedACLStrings`
:param policy: A canned ACL policy that will be applied to the
new key in GCS.
:type md5: A tuple containing the hexdigest version of the MD5
checksum of the file as the first element and the
Base64-encoded version of the plain checksum as the
second element. This is the same format returned by
the compute_md5 method.
:param md5: If you need to compute the MD5 for any reason prior
to upload, it's silly to have to do it twice so this
param, if present, will be used as the MD5 values
of the file. Otherwise, the checksum will be computed.
:type if_generation: int
:param if_generation: (optional) If set to a generation number, the
object will only be written to if its current generation number is
this value. If set to the value 0, the object will only be written
if it doesn't already exist.
"""
# Clear out any previously computed md5 hashes, since we are setting the content.
self.md5 = None
self.base64md5 = None
fp = StringIO.StringIO(get_utf8_value(s))
r = self.set_contents_from_file(fp, headers, replace, cb, num_cb,
policy, md5,
if_generation=if_generation)
fp.close()
return r
def set_contents_from_stream(self, *args, **kwargs):
"""
Store an object using the name of the Key object as the key in
cloud and the contents of the data stream pointed to by 'fp' as
the contents.
The stream object is not seekable and total size is not known.
This has the implication that we can't specify the
Content-Size and Content-MD5 in the header. So for huge
uploads, the delay in calculating MD5 is avoided but with a
penalty of inability to verify the integrity of the uploaded
data.
:type fp: file
:param fp: the file whose contents are to be uploaded
:type headers: dict
:param headers: additional HTTP headers to be sent with the
PUT request.
:type replace: bool
:param replace: If this parameter is False, the method will first check
to see if an object exists in the bucket with the same key. If it
does, it won't overwrite it. The default value is True which will
overwrite the object.
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept two integer
parameters, the first representing the number of bytes that have
been successfully transmitted to GS and the second representing the
total number of bytes that need to be transmitted.
:type num_cb: int
:param num_cb: (optional) If a callback is specified with the
cb parameter, this parameter determines the granularity of
the callback by defining the maximum number of times the
callback will be called during the file transfer.
:type policy: :class:`boto.gs.acl.CannedACLStrings`
:param policy: A canned ACL policy that will be applied to the new key
in GS.
:type size: int
:param size: (optional) The Maximum number of bytes to read from
the file pointer (fp). This is useful when uploading a
file in multiple parts where you are splitting the file up
into different ranges to be uploaded. If not specified,
the default behaviour is to read all bytes from the file
pointer. Less bytes may be available.
:type if_generation: int
:param if_generation: (optional) If set to a generation number, the
object will only be written to if its current generation number is
this value. If set to the value 0, the object will only be written
if it doesn't already exist.
"""
if_generation = kwargs.pop('if_generation', None)
if if_generation is not None:
headers = kwargs.get('headers', {})
headers['x-goog-if-generation-match'] = str(if_generation)
kwargs['headers'] = headers
super(Key, self).set_contents_from_stream(*args, **kwargs)
def set_acl(self, acl_or_str, headers=None, generation=None,
if_generation=None, if_metageneration=None):
"""Sets the ACL for this object.
:type acl_or_str: string or :class:`boto.gs.acl.ACL`
:param acl_or_str: A canned ACL string (see
:data:`~.gs.acl.CannedACLStrings`) or an ACL object.
:type headers: dict
:param headers: Additional headers to set during the request.
:type generation: int
:param generation: If specified, sets the ACL for a specific generation
of a versioned object. If not specified, the current version is
modified.
:type if_generation: int
:param if_generation: (optional) If set to a generation number, the acl
will only be updated if its current generation number is this value.
:type if_metageneration: int
:param if_metageneration: (optional) If set to a metageneration number,
the acl will only be updated if its current metageneration number is
this value.
"""
if self.bucket is not None:
self.bucket.set_acl(acl_or_str, self.name, headers=headers,
generation=generation,
if_generation=if_generation,
if_metageneration=if_metageneration)
def get_acl(self, headers=None, generation=None):
"""Returns the ACL of this object.
:param dict headers: Additional headers to set during the request.
:param int generation: If specified, gets the ACL for a specific
generation of a versioned object. If not specified, the current
version is returned.
:rtype: :class:`.gs.acl.ACL`
"""
if self.bucket is not None:
return self.bucket.get_acl(self.name, headers=headers,
generation=generation)
def get_xml_acl(self, headers=None, generation=None):
"""Returns the ACL string of this object.
:param dict headers: Additional headers to set during the request.
:param int generation: If specified, gets the ACL for a specific
generation of a versioned object. If not specified, the current
version is returned.
:rtype: str
"""
if self.bucket is not None:
return self.bucket.get_xml_acl(self.name, headers=headers,
generation=generation)
def set_xml_acl(self, acl_str, headers=None, generation=None,
if_generation=None, if_metageneration=None):
"""Sets this objects's ACL to an XML string.
:type acl_str: string
:param acl_str: A string containing the ACL XML.
:type headers: dict
:param headers: Additional headers to set during the request.
:type generation: int
:param generation: If specified, sets the ACL for a specific generation
of a versioned object. If not specified, the current version is
modified.
:type if_generation: int
:param if_generation: (optional) If set to a generation number, the acl
will only be updated if its current generation number is this value.
:type if_metageneration: int
:param if_metageneration: (optional) If set to a metageneration number,
the acl will only be updated if its current metageneration number is
this value.
"""
if self.bucket is not None:
return self.bucket.set_xml_acl(acl_str, self.name, headers=headers,
generation=generation,
if_generation=if_generation,
if_metageneration=if_metageneration)
def set_canned_acl(self, acl_str, headers=None, generation=None,
if_generation=None, if_metageneration=None):
"""Sets this objects's ACL using a predefined (canned) value.
:type acl_str: string
:param acl_str: A canned ACL string. See
:data:`~.gs.acl.CannedACLStrings`.
:type headers: dict
:param headers: Additional headers to set during the request.
:type generation: int
:param generation: If specified, sets the ACL for a specific generation
of a versioned object. If not specified, the current version is
modified.
:type if_generation: int
:param if_generation: (optional) If set to a generation number, the acl
will only be updated if its current generation number is this value.
:type if_metageneration: int
:param if_metageneration: (optional) If set to a metageneration number,
the acl will only be updated if its current metageneration number is
this value.
"""
if self.bucket is not None:
return self.bucket.set_canned_acl(
acl_str,
self.name,
headers=headers,
generation=generation,
if_generation=if_generation,
if_metageneration=if_metageneration
)
def compose(self, components, content_type=None, headers=None):
"""Create a new object from a sequence of existing objects.
The content of the object representing this Key will be the
concatenation of the given object sequence. For more detail, visit
https://developers.google.com/storage/docs/composite-objects
:type components list of Keys
:param components List of gs.Keys representing the component objects
:type content_type (optional) string
:param content_type Content type for the new composite object.
"""
compose_req = []
for key in components:
if key.bucket.name != self.bucket.name:
raise BotoClientError(
'GCS does not support inter-bucket composing')
generation_tag = ''
if key.generation:
generation_tag = ('<Generation>%s</Generation>'
% str(key.generation))
compose_req.append('<Component><Name>%s</Name>%s</Component>' %
(key.name, generation_tag))
compose_req_xml = ('<ComposeRequest>%s</ComposeRequest>' %
''.join(compose_req))
headers = headers or {}
if content_type:
headers['Content-Type'] = content_type
resp = self.bucket.connection.make_request(
'PUT', get_utf8_value(self.bucket.name), get_utf8_value(self.name),
headers=headers, query_args='compose',
data=get_utf8_value(compose_req_xml))
if resp.status < 200 or resp.status > 299:
raise self.bucket.connection.provider.storage_response_error(
resp.status, resp.reason, resp.read())
# Return the generation so that the result URI can be built with this
# for automatic parallel uploads.
return resp.getheader('x-goog-generation')
|
mikel-egana-aranguren/SADI-Galaxy-Docker
|
galaxy-dist/eggs/boto-2.27.0-py2.7.egg/boto/gs/key.py
|
Python
|
gpl-3.0
| 42,479
|
[
"VisIt"
] |
6e035313eeef9af3346381fdc732ba9174967db01837712d5bcc577b908545a4
|
"""
Methods for dealing with the Group hierarchy.
"""
import ncobj
from ncobj import Group, Variable, Dimension, Attribute
from collections import namedtuple
def walk_group_objects(group, of_types=None):
"""
Iterate over all contained components, recursively.
Args:
* of_types (type or iterable of types):
If used, filter results by "isinstance(<element>, of_types)".
Returns:
an iterator
"""
if of_types is None or isinstance(group, of_types):
yield group
for container in (group.dimensions,
group.variables,
group.attributes):
for element in container:
if of_types is None or isinstance(element, of_types):
yield element
for subgroup in group.groups:
for obj in walk_group_objects(subgroup, of_types):
yield obj
def all_variables(group):
"""Return a list of all enclosed :class:`~ncobj.Variable` definitions."""
return list(walk_group_objects(group, Variable))
def all_dimensions(group):
"""Return a list of all enclosed :class:`~ncobj.Dimension` definitions."""
return list(walk_group_objects(group, Dimension))
def all_groups(group):
"""Return a list of all sub-groups."""
return list(walk_group_objects(group, Group))
def group_path(ncobj):
"""
Return a string representing the absolute location of the element relative
to the root group.
Args:
* ncobj (:class:`~ncobj.NcObj`)
The element to locate.
For example:
group_path(<var>) --> "/group_A/var_X"
"""
path = ncobj.name
if ncobj.container and isinstance(ncobj.container.in_element, Group):
path = group_path(ncobj.container.in_element) + '/' + path
return path
def _find_definition(group, name, container_prop_name):
"""
Search groups upward for a definition by name and container property name.
Args:
* group (:class:`~ncobj.Group`):
The group to start searching at.
* name (:class:`~ncobj.NcObj`):
The name the element should have.
* container_prop_name:
The Group container property to look in.
Returns:
An existing definition object, or None.
"""
for element in getattr(group, container_prop_name):
if element.name == name:
# Found in given group.
return element
# Not in this group: Look in the parent (if any).
if group.parent_group:
return _find_definition(group.parent_group, name,
container_prop_name)
# Group has no parent, so we are done (fail).
return None
def find_named_definition(group, name, element_type):
"""
Search groups upward for a definition by name and element type.
Args:
* group (:class:`~ncobj.Group`):
The group to start searching at.
* name (:class:`~ncobj.NcObj`):
The name the element should have.
* element_type (type):
The element type (class) to search for (defines the relevant Group
container attribute).
Returns:
An existing definition object, or None.
"""
# Work out which Group property to search for this type of element.
if issubclass(element_type, Dimension):
container_prop_name = 'dimensions'
elif issubclass(element_type, Variable):
container_prop_name = 'variables'
else:
raise ValueError('type "{}" is not recognised, or not supported for '
'definition lookup'.format(element_type))
return _find_definition(group, name, container_prop_name)
class DimensionConflictError(Exception):
"""Exception raised when dimension information is invalid."""
pass
class NameConflictError(Exception):
"""Exception raised when names of components coincide."""
pass
class IncompleteStructureError(Exception):
"""Exception raised when a required dimension definition is missing."""
def __init__(self, var, dim):
msg = ('Variable "{}" needs a dimension "{}", for which no definition '
'exists in the group structure.'.format(
group_path(var), dim.name))
super(IncompleteStructureError, self).__init__(msg)
# Check that all names within a group are compatible.
def check_group_name_clashes(group):
"""
Check this group and subgroups for any name clashes between components.
If found, raise a :class:`NameConflictError` describing the first clash.
.. note::
Name collisions can occur between variables, subgroups and user-types:
In NetCDF, these components share a namespace within each group.
"""
for grp in all_groups(group):
var_names = set(group.variables.names())
group_names = set(group.groups.names())
clashes = var_names & group_names
if clashes:
badname = list(clashes)[0]
raise NameConflictError('group "{}" contains both a variable and '
'a subgroup named {}.'.format(
group_path(grp), badname))
def add_missing_dims(group):
"""
Create new definitions for any missing dimensions in the group.
A missing dimension is one referred to by a variable in 'group' or its
subgroups, for which no definition can be located by
:func:`find_named_definition`. The new ones are created in 'group'.
Returns:
A list of the definitions created for missing dimensions.
.. note::
If 'group' is not itself the root, then a matching definition may be
found in a parent group. In these cases, no new definition is created
(even though the required definition is outside 'group').
"""
# Find or create definitions for all dimensions used by all variables.
new_created_dims = []
for var in all_variables(group):
var_group = var.definitions_group()
for dim in var.dimensions:
# Locate existing dimension in structure, if any.
dim_def = find_named_definition(var_group, dim.name, Dimension)
if dim_def is None:
# Create a new top-level dimension definition.
group.dimensions.add(dim)
dim_def = group.dimensions[dim.name]
# Keep a list, so we can remove again on error.
new_created_dims.append(dim_def)
return new_created_dims
def has_no_missing_dims(group, fail_if_not=False):
"""
Check that matching definitions exist for all dimensions used in the
variables of this group (and its sub-groups).
Kwargs:
* fail_if_not (bool):
If set, then if and when a missing dimension is found, raise an
:class:`IncompleteStructureError`, instead of just returning False.
.. note::
If 'group' is not itself the root, then a matching definition may be
found in a parent group. Such a dimension is not counted as 'missing'
(even though the required definition is outside 'group').
"""
for var in all_variables(group):
var_group = var.definitions_group()
for dim in var.dimensions:
if not find_named_definition(var_group, dim.name, Dimension):
if fail_if_not:
raise IncompleteStructureError(var, dim)
return False
return True
_DimVarData = namedtuple('DimVarsData', 'var dim')
def _add_dims_varsdata(group):
# NOTE: only on completed structures (i.e. dim definitions all exist).
has_no_missing_dims(group, fail_if_not=True)
if not _has_varsdata(group):
group._with_varsdata = True
# Add blank data to every dimension definition.
for dim in all_dimensions(group):
dim._varsdata = []
# Scan all variables and record usage against dimensions referenced.
for var in all_variables(group):
var_group = var.definitions_group()
if not hasattr(var.data, 'shape'):
# Take dims as given (lengths etc. may be unspecified)
var_dims = var.dimensions
else:
# Construct dims modified by var shape where needed.
shape = var.data.shape
if len(shape) != len(var.dimensions):
raise DimensionConflictError(
'Variable {} has {} dimensions, but its data has {} '
'dimensions.'.format(group_path(var),
len(var.dimensions),
len(var.data.shape)))
var_dims = [dim if shape[i_dim] == dim.length
else Dimension(dim.name, length=shape[i_dim],
unlimited=dim.unlimited)
for i_dim, dim in enumerate(var.dimensions)]
for dim in var_dims:
# Locate the matching dimension definition in the structure.
dim_def = find_named_definition(var_group, dim.name, Dimension)
assert dim_def is not None
# Add the variable with its dimension usage.
dim_def._varsdata.append(_DimVarData(var, dim))
def _remove_dims_varsdata(group):
for dim in all_dimensions(group):
del dim._varsdata
del group._with_varsdata
def _has_varsdata(group):
return hasattr(group, '_with_varsdata')
def check_consistent_dims_usage(group):
"""
Check that the requirements for all dimensions are consistent, and if not
raise a :class:`DimensionConflictError`.
This means that all references to each dimension must have the same length.
Where variables have attached data, the length is taken from the data shape
instead of the attached :class:`Dimension` object, and the number of
dimensions must also match. Each dimension must also have a known length,
meaning that at least one reference must define the length, or have
attached data.
.. note::
Can only be used on groups with no missing dimensions, as described for
:func:`has_no_missing_dimensions`.
Otherwise a :class:`IncompleteStructureError` will be raised.
"""
has_existing_varsdata = _has_varsdata(group)
if not has_existing_varsdata:
_add_dims_varsdata(group)
try:
for dim in all_dimensions(group):
# Look for conflicting requirements, which means defined (non-None)
# lengths that don't match.
# Different "unlimited" vals is not an error, so ignore those here.
vars_dims = [var_dim for var_dim in dim._varsdata
if var_dim.dim.length is not None]
if not vars_dims and dim.length is None:
raise DimensionConflictError(
'No length can be deduced for dimension "{}".'.format(
group_path(dim)))
# Complain if referencing variables disagree about the length.
# NOTE: the dimension _itself_ may have a different length, this is
# overridden by any length in the variables
if len(vars_dims) > 1:
var1, dim1 = vars_dims[0]
for (varx, dimx) in vars_dims[1:]:
if dimx.length != dim1.length:
raise DimensionConflictError(
'Variable "{}" requires dimension "{}" = {}, but '
'variable "{}" requires "{}" = {}".'.format(
group_path(var1), dim1.name, dim1.length,
group_path(varx), dimx.name, dimx.length))
finally:
if not has_existing_varsdata:
_remove_dims_varsdata(group)
def complete(group):
"""
Make this group internally consistent, by adding any missing dimension
definitions and linking all variable dimensions to their definitions.
This makes the structure fully compliant with NetCDF constraints. This
ensures it is suitable to be written to a file.
Dimension definitions are made consistent with the data and dimension
information of all variables that reference them. If this is not possible,
as decribed for :func:`check_consistent_dims_usage`, a
:class:`DimensionConflictError` is raised.
A dimension definition will also be made 'unlimited' if any of the
references requires it.
.. note::
A :class:`NameConflictError` can also result if components have
conflicting names, as described for
:func:`check_group_name_clashes`.
"""
# NOTE: make dimensions unlimited when required, and also allow these to be
# used where the variable information implies a fixed dimension.
new_dim_defs = add_missing_dims(group)
_add_dims_varsdata(group)
try:
check_consistent_dims_usage(group)
check_group_name_clashes(group)
except Exception:
# Restore original argument before sending caller an exception.
_remove_dims_varsdata(group)
group.dimensions.remove_allof(new_dim_defs)
raise
# Fix properties of all dimension definitions from variables using them.
for dim in all_dimensions(group):
if dim._varsdata:
# NOTE: do nothing to any unused dimensions here.
# Can easily prune these if wanted.
dims = [vardata.dim for vardata in dim._varsdata]
lengths = [dimx.length for dimx in dims if dimx.length is not None]
# Set length from variables, if any (else what it says in the dim).
if lengths:
# N.B. we already checked that all these lengths are the same.
dim.length = lengths[0]
# Likewise for 'unlimited'
if any(dimx.unlimited for dimx in dims):
dim.unlimited = True
# Connect all variables' dims directly to dimension definitions.
# (N.B. effectively the opposite of the 'detached' concept).
for var in all_variables(group):
var_group = var.definitions_group()
var.dimensions = [find_named_definition(var_group, dim.name, Dimension)
for dim in var.dimensions]
# Tidy up after.
_remove_dims_varsdata(group)
|
pp-mo/ncobj
|
lib/ncobj/grouping.py
|
Python
|
gpl-3.0
| 14,331
|
[
"NetCDF"
] |
e878fad4f912c51fbbe5da4d79c8ad759e5f37e0239aace09a1219da01ef0941
|
#! /usr/bin/env python3
import sys
import os
import numpy as np
d = os.path.dirname(os.path.realpath(__file__))
sys.path.append(d+"/..")
from Functions import *
from trexi.TREXI import *
from cirexi.CIREXI import *
from brexi.BREXI import *
from elrexi.ELREXI import *
sys.path.pop()
for rexi_method in [
#"trexi",
#"cirexi",
#"elrexi",
#"brexi_tanhsinh",
"brexi_gauss",
"brexi_chebyshev",
"brexi_jacobi",
]:
function = Functions(
function_name = "phi3",
efloat_mode = "mpfloat"
)
"""
x = 1e-16j
x = 0.6j+0.5
for i in range(6):
val = function.phiNRec(i, x)
print(str(float(val.real))+"\t"+str(float(val.imag)))
val = function.phiNSeries(i, x)
print(str(float(val.real))+"\t"+str(float(val.imag)))
print("")
sys.exit(1)
"""
for function_name in ["phi0", "phi1", "phi2", "phi3"]: #, "ups1", "ups2", "ups3"]:
#for function_name in ["phi0"]:
# T-REXI: Number of Gaussian basis functions
M = 256
# T-REXI: Spacing between Gaussian basis functions
h = 0.2
# CI-REXI: Number of quadrature poles
#N = M
N = 256
# CI-REXI: Radius
#R = M*h
# CI-REXI: Value on imaginary axis to be included
#same used for ELREXI as max imag of ellipse
lambda_include_imag = 20
# CI-REXI: Maximum value of quadrature pole
#same used for ELREXI as max real for ellipse
lambda_max_real = 10
# Butcher-REXI
butcherOrder = 10
# Testing: number of samples
num_test_samples = 12345
# Testing: Range (start, end)
test_range_real = [None, None]
test_range_imag = [None, None]
# Error to test for
error_eps = 1e-8
# verbose
verbosity = 10
verbosity = 0
# efloat_mode
efloat_mode = "float"
#efloat_mode = "mpfloat"
coeffs = None
print("REXI method: "+rexi_method)
if rexi_method == "trexi":
if function_name != "phi0":
continue
trexi = TREXI(efloat_mode=efloat_mode)
coeffs = trexi.setup(
function_name = function_name,
M = N,
h = h
)
# Convert to floating point
coeffs = coeffs.toFloat()
unique_id_string = trexi.getUniqueId()
test_range_real = None
test_range_imag = [-N*h*0.95, N*h*0.95]
elif rexi_method == "cirexi":
cirexi = CIREXI(efloat_mode=efloat_mode)
coeffs = cirexi.setup(
function_name = function_name,
N = N,
lambda_max_real = lambda_max_real,
lambda_include_imag = lambda_include_imag
)
# Convert to floating point
coeffs = coeffs.toFloat()
unique_id_string = cirexi.getUniqueId()
#test_range = [-lambda_include_imag*0.5, lambda_include_imag*0.5]
test_range_real = [-1.0, 1.0]
test_range_imag = [-1.0, 1.0]
elif rexi_method == "elrexi":
elrexi = ELREXI(efloat_mode=efloat_mode)
coeffs = elrexi.setup(
function_name = function_name,
N = N,
lambda_max_real = lambda_max_real,
lambda_max_imag = lambda_include_imag
)
# Convert to floating point
coeffs = coeffs.toFloat()
unique_id_string = elrexi.getUniqueId()
#test_range = [-lambda_include_imag*0.5, lambda_include_imag*0.5]
test_range_real = [-1.0, 1.0]
test_range_imag = [-1.0, 1.0]
elif rexi_method == "brexi_gauss":
if function_name != "phi0":
continue
brexi = BREXI(efloat_mode=efloat_mode)
coeffs = brexi.setup(N=butcherOrder, quadrature_method="gauss_legendre")
# Convert to floating point
coeffs = coeffs.toFloat()
unique_id_string = brexi.getUniqueId()
test_range_real = [butcherOrder*0.25, butcherOrder*0.25]
test_range_imag = [butcherOrder*0.5, butcherOrder*0.5]
elif rexi_method == "brexi_jacobi":
if function_name != "phi0":
continue
brexi = BREXI(efloat_mode=efloat_mode)
coeffs = brexi.setup(N=butcherOrder, quadrature_method="gauss_jacobi")
# Convert to floating point
coeffs = coeffs.toFloat()
unique_id_string = brexi.getUniqueId()
test_range_real = [butcherOrder*0.25, butcherOrder*0.25]
test_range_imag = [butcherOrder*0.5, butcherOrder*0.5]
elif rexi_method == "brexi_chebyshev":
if function_name != "phi0":
continue
brexi = BREXI(efloat_mode=efloat_mode)
coeffs = brexi.setup(N=butcherOrder, quadrature_method="gauss_chebyshev_u")
# Convert to floating point
coeffs = coeffs.toFloat()
unique_id_string = brexi.getUniqueId()
test_range_real = [butcherOrder*0.15, butcherOrder*0.15]
test_range_imag = [butcherOrder*0.25, butcherOrder*0.25]
else:
raise Exception("Unsupported REXI method")
function = Functions(
function_name = function_name,
efloat_mode = "float"
)
print("")
print(unique_id_string)
print(" + function_name: "+function_name)
if test_range_real != None:
max_error = 0
for x in np.linspace(test_range_real[0], test_range_real[1], num_test_samples):
lam = x
y = function.eval(lam)
yn = coeffs.eval(lam)
err = np.abs(y-yn)
if verbosity > 0:
#if True:
if False:
print("x="+str(lam)+"\t\terror="+str(err))
else:
print("Lambda: "+str(lam))
print(" + exact: "+str(y))
print(" + approx: "+str(yn))
print(" + Error: "+str(err))
print("")
max_error = max(max_error, err)
if verbosity == 0:
print(" + test_range_real: ["+str(test_range_real[0])+", "+str(test_range_real[1])+"]")
print(" + Error: "+str(max_error))
if max_error > error_eps:
raise Exception("Error threshold "+str(error_eps)+" exceeded")
if test_range_imag != None:
max_error = 0
for x in np.linspace(test_range_imag[0], test_range_imag[1], num_test_samples):
lam = 1j*x
y = function.eval(lam)
yn = coeffs.eval(lam)
err = np.abs(y-yn)
if verbosity > 0:
#if True:
if False:
print("x="+str(lam)+"\t\terror="+str(err))
else:
print("Lambda: "+str(lam))
print(" + exact: "+str(y))
print(" + approx: "+str(yn))
print(" + Error: "+str(err))
print("")
max_error = max(max_error, err)
if verbosity == 0:
print(" + test_range_imag: ["+str(test_range_imag[0])+", "+str(test_range_imag[1])+"]")
print(" + Error: "+str(max_error))
if max_error > error_eps:
raise Exception("Error threshold "+str(error_eps)+" exceeded")
#coeffs.write_file("/tmp/REXI_"+rexi_method+"_"+unique_id_string+"_txt.txt", False)
#coeffs.write_file("/tmp/REXI_"+rexi_method+"_"+unique_id_string+"_bin.txt", True)
|
schreiberx/sweet
|
mule_local/python/mule_local/rexi/tests/test_rexi_approximations.py
|
Python
|
mit
| 7,999
|
[
"Gaussian"
] |
b3b655288868c7b8e644f46e304e02047d79ffe98a2692c84424df8b3a6907e8
|
###########################################################################
#
# This program is part of Zenoss Core, an open source monitoring platform.
# Copyright (C) 2007, Zenoss Inc.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2 as published by
# the Free Software Foundation.
#
# For complete information please visit: http://www.zenoss.com/oss/
#
# Created By : Wouter D'Haeseleer
# Created On : 05-11-2007
# Company : Imas NV
#
#
# Re-Edited By: Randy Schneiderman
# Re-Edited On: 07/21/2008
# Comapany: Stroz Friedberg
#
# New Comments: Based on the VMwareESX ZenPack, this plugin uses the same code
# with the exception of the class name and the command use.
# This has been successfully tested with EMC's Celerra Network
# Server v5.5.
#
###########################################################################
import re
from Products.DataCollector.plugins.CollectorPlugin import CommandPlugin
class CelerraDf(CommandPlugin):
"""
Run server_df to model filesystem information.
"""
maptype = "FilesystemMap"
command = 'export NAS_DB=/nas && /nas/bin/server_df server_2'
compname = "os"
relname = "filesystems"
modname = "Products.ZenModel.FileSystem"
def process(self, device, results, log):
log.info('Collecting filesystems for device %s' % device.id)
skipfsnames = getattr(device, 'zFileSystemMapIgnoreNames', None)
rm = self.relMap()
rlines = results.split("\n")
bline = ""
for line in rlines:
if line.startswith("Filesystem"): continue
om = self.objectMap()
spline = line.split()
if len(spline) == 1:
bline = spline[0]
continue
if bline:
spline.insert(0,bline)
bline = None
if len(spline) != 6: continue
(om.storageDevice, tblocks, u, a, p, om.mount) = spline
if skipfsnames and re.search(skipfsnames,om.mount): continue
om.totalBlocks = long(tblocks)
om.blockSize = 1024
om.id = self.prepId(om.mount)
rm.append(om)
return rm
|
zenoss/ZenPacks.community.CelerraFileSystem
|
ZenPacks/community/CelerraFileSystem/modeler/plugins/CelerraDf.py
|
Python
|
gpl-2.0
| 2,254
|
[
"VisIt"
] |
22c63ed1fa7e507c495b3a98a986a27b340561571304fd1c25750a41b6648e79
|
# -*- coding: utf-8 -*-
"""
Created on Wed May 17 09:51:50 2017
@author: banggui
"""
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
from glob import glob
import SimpleITK as sitk
import os
import scipy.ndimage
import matplotlib.pyplot as plt
from skimage import measure, morphology
def load_scan(path):
img_info = sitk.ReadImage(path)
slices = sitk.GetArrayFromImage(img_info)
return slices, np.array(img_info.GetOrigin())[::-1], np.array(img_info.GetSpacing())[::-1]
def get_pixels_hu(slices):
image = np.stack([s for s in slices])
# Convert to int16 (from sometimes int16),
# should be possible as values should always be low enough (<32k)
image = image.astype(np.int16)
# Convert to Hounsfield units (HU)
for slice_number in range(len(slices)):
intercept = slices[slice_number].RescaleIntercept
slope = slices[slice_number].RescaleSlope
if slope != 1:
image[slice_number] = slope * image[slice_number].astype(np.float64)
image[slice_number] = image[slice_number].astype(np.int16)
image[slice_number] += np.int16(intercept)
return np.array(image, dtype=np.int16), np.array([slices[0].SliceThickness] + slices[0].PixelSpacing, dtype=np.float32)
def binarize_per_slice(image, spacing, intensity_th=-600, sigma=1, area_th=30, eccen_th=0.99, bg_patch_size=10):
bw = np.zeros(image.shape, dtype=bool)
# prepare a mask, with all corner values set to nan
image_size = image.shape[1]
grid_axis = np.linspace(-image_size/2+0.5, image_size/2-0.5, image_size)
x, y = np.meshgrid(grid_axis, grid_axis)
d = (x**2+y**2)**0.5
nan_mask = (d<image_size/2).astype(float)
nan_mask[nan_mask == 0] = np.nan
for i in range(image.shape[0]):
# Check if corner pixels are identical, if so the slice before Gaussian filtering
if len(np.unique(image[i, 0:bg_patch_size, 0:bg_patch_size])) == 1:
current_bw = scipy.ndimage.filters.gaussian_filter(np.multiply(image[i].astype('float32'), nan_mask), sigma, truncate=2.0) < intensity_th
else:
current_bw = scipy.ndimage.filters.gaussian_filter(image[i].astype('float32'), sigma, truncate=2.0) < intensity_th
# select proper components
label = measure.label(current_bw)
properties = measure.regionprops(label)
valid_label = set()
for prop in properties:
if prop.area * spacing[1] * spacing[2] > area_th and prop.eccentricity < eccen_th:
valid_label.add(prop.label)
current_bw = np.in1d(label, list(valid_label)).reshape(label.shape)
bw[i] = current_bw
return bw
def all_slice_analysis(bw, spacing, cut_num=0, vol_limit=[0.68, 8.2], area_th=6e3, dist_th=62):
# in some cases, several top layers need to be removed first
if cut_num > 0:
bw0 = np.copy(bw)
bw[-cut_num:] = False
label = measure.label(bw, connectivity=1)
# remove components access to corners
mid = int(label.shape[2] / 2)
bg_label = set([label[0, 0, 0], label[0, 0, -1], label[0, -1, 0], label[0, -1, -1], \
label[-1-cut_num, 0, 0], label[-1-cut_num, 0, -1], label[-1-cut_num, -1, 0], label[-1-cut_num, -1, -1], \
label[0, 0, mid], label[0, -1, mid], label[-1-cut_num, 0, mid], label[-1-cut_num, -1, mid]])
for l in bg_label:
label[label == l] = 0
# select components based on volume
properties = measure.regionprops(label)
for prop in properties:
if prop.area * spacing.prod() < vol_limit[0] * 1e6 or prop.area * spacing.prod() > vol_limit[1] * 1e6:
label[label == prop.label] = 0
# prepare a distance map for further analysis
x_axis = np.linspace(-label.shape[1]/2+0.5, label.shape[1]/2-0.5, label.shape[1]) * spacing[1]
y_axis = np.linspace(-label.shape[2]/2+0.5, label.shape[2]/2-0.5, label.shape[2]) * spacing[2]
x, y = np.meshgrid(x_axis, y_axis)
d = (x**2+y**2)**0.5
vols = measure.regionprops(label)
valid_label = set()
# select components based on their area and distance to center axis on all slices
for vol in vols:
single_vol = label == vol.label
slice_area = np.zeros(label.shape[0])
min_distance = np.zeros(label.shape[0])
for i in range(label.shape[0]):
slice_area[i] = np.sum(single_vol[i]) * np.prod(spacing[1:3])
min_distance[i] = np.min(single_vol[i] * d + (1 - single_vol[i]) * np.max(d))
if np.average([min_distance[i] for i in range(label.shape[0]) if slice_area[i] > area_th]) < dist_th:
valid_label.add(vol.label)
bw = np.in1d(label, list(valid_label)).reshape(label.shape)
# fill back the parts removed earlier
if cut_num > 0:
# bw1 is bw with removed slices, bw2 is a dilated version of bw, part of their intersection is returned as final mask
bw1 = np.copy(bw)
bw1[-cut_num:] = bw0[-cut_num:]
bw2 = np.copy(bw)
bw2 = scipy.ndimage.binary_dilation(bw2, iterations=cut_num)
bw3 = bw1 & bw2
label = measure.label(bw, connectivity=1)
label3 = measure.label(bw3, connectivity=1)
l_list = list(set(np.unique(label)) - {0})
valid_l3 = set()
for l in l_list:
indices = np.nonzero(label==l)
l3 = label3[indices[0][0], indices[1][0], indices[2][0]]
if l3 > 0:
valid_l3.add(l3)
bw = np.in1d(label3, list(valid_l3)).reshape(label3.shape)
return bw, len(valid_label)
def fill_hole(bw):
# fill 3d holes
label = measure.label(~bw)
# idendify corner components
bg_label = set([label[0, 0, 0], label[0, 0, -1], label[0, -1, 0], label[0, -1, -1], \
label[-1, 0, 0], label[-1, 0, -1], label[-1, -1, 0], label[-1, -1, -1]])
bw = ~np.in1d(label, list(bg_label)).reshape(label.shape)
return bw
def two_lung_only(bw, spacing, max_iter=22, max_ratio=4.8):
def extract_main(bw, cover=0.95):
for i in range(bw.shape[0]):
current_slice = bw[i]
label = measure.label(current_slice)
properties = measure.regionprops(label)
properties.sort(key=lambda x: x.area, reverse=True)
area = [prop.area for prop in properties]
count = 0
sum = 0
while sum < np.sum(area)*cover:
sum = sum+area[count]
count = count+1
filter = np.zeros(current_slice.shape, dtype=bool)
for j in range(count):
bb = properties[j].bbox
filter[bb[0]:bb[2], bb[1]:bb[3]] = filter[bb[0]:bb[2], bb[1]:bb[3]] | properties[j].convex_image
bw[i] = bw[i] & filter
label = measure.label(bw)
properties = measure.regionprops(label)
properties.sort(key=lambda x: x.area, reverse=True)
bw = label==properties[0].label
return bw
def fill_2d_hole(bw):
for i in range(bw.shape[0]):
current_slice = bw[i]
label = measure.label(current_slice)
properties = measure.regionprops(label)
for prop in properties:
bb = prop.bbox
current_slice[bb[0]:bb[2], bb[1]:bb[3]] = current_slice[bb[0]:bb[2], bb[1]:bb[3]] | prop.filled_image
bw[i] = current_slice
return bw
found_flag = False
iter_count = 0
bw0 = np.copy(bw)
while not found_flag and iter_count < max_iter:
label = measure.label(bw, connectivity=2)
properties = measure.regionprops(label)
properties.sort(key=lambda x: x.area, reverse=True)
if len(properties) > 1 and properties[0].area/properties[1].area < max_ratio:
found_flag = True
bw1 = label == properties[0].label
bw2 = label == properties[1].label
else:
bw = scipy.ndimage.binary_erosion(bw)
iter_count = iter_count + 1
if found_flag:
d1 = scipy.ndimage.morphology.distance_transform_edt(bw1 == False, sampling=spacing)
d2 = scipy.ndimage.morphology.distance_transform_edt(bw2 == False, sampling=spacing)
bw1 = bw0 & (d1 < d2)
bw2 = bw0 & (d1 > d2)
bw1 = extract_main(bw1)
bw2 = extract_main(bw2)
else:
bw1 = bw0
bw2 = np.zeros(bw.shape).astype('bool')
bw1 = fill_2d_hole(bw1)
bw2 = fill_2d_hole(bw2)
bw = bw1 | bw2
return bw1, bw2, bw
def step1_python(case_path):
case_pixels, origin, spacing = load_scan(case_path)
# case_pixels, spacing = get_pixels_hu(case)
bw = binarize_per_slice(case_pixels, spacing)
flag = 0
cut_num = 0
cut_step = 2
bw0 = np.copy(bw)
while flag == 0 and cut_num < bw.shape[0]:
bw = np.copy(bw0)
bw, flag = all_slice_analysis(bw, spacing, cut_num=cut_num, vol_limit=[0.68,7.5])
cut_num = cut_num + cut_step
bw = fill_hole(bw)
bw1, bw2, bw = two_lung_only(bw, spacing)
return case_pixels, bw1, bw2, spacing
if __name__ == '__main__':
# INPUT_FOLDER = './data/sample_patients/val/'
# patients = os.listdir(INPUT_FOLDER)
# patients.sort()
patients = glob('../data/sample_patients/test/LKDS-00012.mhd')
# case_pixels, m1, m2, spacing = step1_python(os.path.join(INPUT_FOLDER,patients[25]))
# case_pixels, m1, m2, spacing = step1_python(patients[25])
# plt.imshow(m1[60])
# plt.figure()
# plt.imshow(m2[60])
first_patient_pixels, origin, spacing = load_scan(patients[0])
# first_patient_pixels, spacing = get_pixels_hu(first_patient)
plt.hist(first_patient_pixels.flatten(), bins=80, color='c')
plt.xlabel("Hounsfield Units (HU)")
plt.ylabel("Frequency")
plt.show()
# Show some slice in the middle
h = 80
plt.imshow(first_patient_pixels[h], cmap=plt.cm.gray)
plt.show()
bw = binarize_per_slice(first_patient_pixels, spacing)
plt.imshow(bw[h], cmap=plt.cm.gray)
plt.show()
flag = 0
cut_num = 0
while flag == 0:
bw, flag = all_slice_analysis(bw, spacing, cut_num=cut_num)
cut_num = cut_num + 1
plt.imshow(bw[h], cmap=plt.cm.gray)
plt.show()
bw = fill_hole(bw)
plt.imshow(bw[h], cmap=plt.cm.gray)
plt.show()
bw1, bw2, bw = two_lung_only(bw, spacing)
plt.imshow(bw[h], cmap=plt.cm.gray)
plt.show()
|
shihuai/TCAI-2017
|
preprocess/step1.py
|
Python
|
mit
| 10,440
|
[
"Gaussian"
] |
b601315c8fe7c8cdf943978b9ecbb6a1ca2829d0588b3c3a56e8311f03785bf6
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2016, Brian Coca <bcoca@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
module: systemd
author:
- "Ansible Core Team"
version_added: "2.2"
short_description: Manage services.
description:
- Controls systemd services on remote hosts.
options:
name:
required: true
description:
- Name of the service.
aliases: ['unit', 'service']
state:
required: false
default: null
choices: [ 'started', 'stopped', 'restarted', 'reloaded' ]
description:
- C(started)/C(stopped) are idempotent actions that will not run commands unless necessary.
C(restarted) will always bounce the service. C(reloaded) will always reload.
enabled:
required: false
choices: [ "yes", "no" ]
default: null
description:
- Whether the service should start on boot. B(At least one of state and enabled are required.)
masked:
required: false
choices: [ "yes", "no" ]
default: null
description:
- Whether the unit should be masked or not, a masked unit is impossible to start.
daemon_reload:
required: false
default: no
choices: [ "yes", "no" ]
description:
- run daemon-reload before doing any other operations, to make sure systemd has read any changes.
aliases: ['daemon-reload']
notes:
- One option other than name is required.
requirements:
- A system managed by systemd
'''
EXAMPLES = '''
# Example action to start service httpd, if not running
- systemd: state=started name=httpd
# Example action to stop service cron on debian, if running
- systemd: name=cron state=stopped
# Example action to restart service cron on centos, in all cases, also issue deamon-reload to pick up config changes
- systemd: state=restarted daemon_reload: yes name=crond
# Example action to reload service httpd, in all cases
- systemd: name=httpd state=reloaded
# Example action to enable service httpd and ensure it is not masked
- systemd:
name: httpd
enabled: yes
masked: no
'''
RETURN = '''
status:
description: A dictionary with the key=value pairs returned from `systemctl show`
returned: success
type: complex
sample: {
"ActiveEnterTimestamp": "Sun 2016-05-15 18:28:49 EDT",
"ActiveEnterTimestampMonotonic": "8135942",
"ActiveExitTimestampMonotonic": "0",
"ActiveState": "active",
"After": "auditd.service systemd-user-sessions.service time-sync.target systemd-journald.socket basic.target system.slice",
"AllowIsolate": "no",
"Before": "shutdown.target multi-user.target",
"BlockIOAccounting": "no",
"BlockIOWeight": "1000",
"CPUAccounting": "no",
"CPUSchedulingPolicy": "0",
"CPUSchedulingPriority": "0",
"CPUSchedulingResetOnFork": "no",
"CPUShares": "1024",
"CanIsolate": "no",
"CanReload": "yes",
"CanStart": "yes",
"CanStop": "yes",
"CapabilityBoundingSet": "18446744073709551615",
"ConditionResult": "yes",
"ConditionTimestamp": "Sun 2016-05-15 18:28:49 EDT",
"ConditionTimestampMonotonic": "7902742",
"Conflicts": "shutdown.target",
"ControlGroup": "/system.slice/crond.service",
"ControlPID": "0",
"DefaultDependencies": "yes",
"Delegate": "no",
"Description": "Command Scheduler",
"DevicePolicy": "auto",
"EnvironmentFile": "/etc/sysconfig/crond (ignore_errors=no)",
"ExecMainCode": "0",
"ExecMainExitTimestampMonotonic": "0",
"ExecMainPID": "595",
"ExecMainStartTimestamp": "Sun 2016-05-15 18:28:49 EDT",
"ExecMainStartTimestampMonotonic": "8134990",
"ExecMainStatus": "0",
"ExecReload": "{ path=/bin/kill ; argv[]=/bin/kill -HUP $MAINPID ; ignore_errors=no ; start_time=[n/a] ; stop_time=[n/a] ; pid=0 ; code=(null) ; status=0/0 }",
"ExecStart": "{ path=/usr/sbin/crond ; argv[]=/usr/sbin/crond -n $CRONDARGS ; ignore_errors=no ; start_time=[n/a] ; stop_time=[n/a] ; pid=0 ; code=(null) ; status=0/0 }",
"FragmentPath": "/usr/lib/systemd/system/crond.service",
"GuessMainPID": "yes",
"IOScheduling": "0",
"Id": "crond.service",
"IgnoreOnIsolate": "no",
"IgnoreOnSnapshot": "no",
"IgnoreSIGPIPE": "yes",
"InactiveEnterTimestampMonotonic": "0",
"InactiveExitTimestamp": "Sun 2016-05-15 18:28:49 EDT",
"InactiveExitTimestampMonotonic": "8135942",
"JobTimeoutUSec": "0",
"KillMode": "process",
"KillSignal": "15",
"LimitAS": "18446744073709551615",
"LimitCORE": "18446744073709551615",
"LimitCPU": "18446744073709551615",
"LimitDATA": "18446744073709551615",
"LimitFSIZE": "18446744073709551615",
"LimitLOCKS": "18446744073709551615",
"LimitMEMLOCK": "65536",
"LimitMSGQUEUE": "819200",
"LimitNICE": "0",
"LimitNOFILE": "4096",
"LimitNPROC": "3902",
"LimitRSS": "18446744073709551615",
"LimitRTPRIO": "0",
"LimitRTTIME": "18446744073709551615",
"LimitSIGPENDING": "3902",
"LimitSTACK": "18446744073709551615",
"LoadState": "loaded",
"MainPID": "595",
"MemoryAccounting": "no",
"MemoryLimit": "18446744073709551615",
"MountFlags": "0",
"Names": "crond.service",
"NeedDaemonReload": "no",
"Nice": "0",
"NoNewPrivileges": "no",
"NonBlocking": "no",
"NotifyAccess": "none",
"OOMScoreAdjust": "0",
"OnFailureIsolate": "no",
"PermissionsStartOnly": "no",
"PrivateNetwork": "no",
"PrivateTmp": "no",
"RefuseManualStart": "no",
"RefuseManualStop": "no",
"RemainAfterExit": "no",
"Requires": "basic.target",
"Restart": "no",
"RestartUSec": "100ms",
"Result": "success",
"RootDirectoryStartOnly": "no",
"SameProcessGroup": "no",
"SecureBits": "0",
"SendSIGHUP": "no",
"SendSIGKILL": "yes",
"Slice": "system.slice",
"StandardError": "inherit",
"StandardInput": "null",
"StandardOutput": "journal",
"StartLimitAction": "none",
"StartLimitBurst": "5",
"StartLimitInterval": "10000000",
"StatusErrno": "0",
"StopWhenUnneeded": "no",
"SubState": "running",
"SyslogLevelPrefix": "yes",
"SyslogPriority": "30",
"TTYReset": "no",
"TTYVHangup": "no",
"TTYVTDisallocate": "no",
"TimeoutStartUSec": "1min 30s",
"TimeoutStopUSec": "1min 30s",
"TimerSlackNSec": "50000",
"Transient": "no",
"Type": "simple",
"UMask": "0022",
"UnitFileState": "enabled",
"WantedBy": "multi-user.target",
"Wants": "system.slice",
"WatchdogTimestampMonotonic": "0",
"WatchdogUSec": "0",
}
'''
import os
import glob
from ansible.module_utils.basic import AnsibleModule
# ===========================================
# Main control flow
def main():
# init
module = AnsibleModule(
argument_spec = dict(
name = dict(required=True, type='str', aliases=['unit', 'service']),
state = dict(choices=[ 'started', 'stopped', 'restarted', 'reloaded'], type='str'),
enabled = dict(type='bool'),
masked = dict(type='bool'),
daemon_reload= dict(type='bool', default=False, aliases=['daemon-reload']),
),
supports_check_mode=True,
required_one_of=[['state', 'enabled', 'masked', 'daemon_reload']],
)
# initialize
systemctl = module.get_bin_path('systemctl')
unit = module.params['name']
rc = 0
out = err = ''
result = {
'name': unit,
'changed': False,
'status': {},
}
# Run daemon-reload first, if requested
if module.params['daemon_reload']:
(rc, out, err) = module.run_command("%s daemon-reload" % (systemctl))
if rc != 0:
module.fail_json(msg='failure %d during daemon-reload: %s' % (rc, err))
#TODO: check if service exists
(rc, out, err) = module.run_command("%s show '%s'" % (systemctl, unit))
if rc != 0:
module.fail_json(msg='failure %d running systemctl show for %r: %s' % (rc, unit, err))
# load return of systemctl show into dictionary for easy access and return
k = None
multival = []
for line in out.split('\n'): # systemd can have multiline values delimited with {}
if line.strip():
if k is None:
if '=' in line:
k,v = line.split('=', 1)
if v.lstrip().startswith('{'):
if not v.rstrip().endswith('}'):
multival.append(line)
continue
result['status'][k] = v.strip()
k = None
else:
if line.rstrip().endswith('}'):
result['status'][k] = '\n'.join(multival).strip()
multival = []
k = None
else:
multival.append(line)
if 'LoadState' in result['status'] and result['status']['LoadState'] == 'not-found':
module.fail_json(msg='Could not find the requested service "%r": %s' % (unit, err))
elif 'LoadError' in result['status']:
module.fail_json(msg="Failed to get the service status '%s': %s" % (unit, result['status']['LoadError']))
# mask/unmask the service, if requested
if module.params['masked'] is not None:
masked = (result['status']['LoadState'] == 'masked')
# Change?
if masked != module.params['masked']:
result['changed'] = True
if module.params['masked']:
action = 'mask'
else:
action = 'unmask'
if not module.check_mode:
(rc, out, err) = module.run_command("%s %s '%s'" % (systemctl, action, unit))
if rc != 0:
module.fail_json(msg="Unable to %s service %s: %s" % (action, unit, err))
# Enable/disable service startup at boot if requested
if module.params['enabled'] is not None:
# do we need to enable the service?
enabled = False
(rc, out, err) = module.run_command("%s is-enabled '%s'" % (systemctl, unit))
# check systemctl result or if it is a init script
initscript = '/etc/init.d/' + unit
if rc == 0 or (os.access(initscript, os.X_OK) and bool(glob.glob('/etc/rc?.d/S??' + unit))):
enabled = True
# default to current state
result['enabled'] = enabled
# Change enable/disable if needed
if enabled != module.params['enabled']:
result['changed'] = True
if module.params['enabled']:
action = 'enable'
else:
action = 'disable'
if not module.check_mode:
(rc, out, err) = module.run_command("%s %s '%s'" % (systemctl, action, unit))
if rc != 0:
module.fail_json(msg="Unable to %s service %s: %s" % (action, unit, err))
result['enabled'] = not enabled
if module.params['state'] is not None:
# default to desired state
result['state'] = module.params['state']
# What is current service state?
if 'ActiveState' in result['status']:
action = None
if module.params['state'] == 'started':
if result['status']['ActiveState'] != 'active':
action = 'start'
result['changed'] = True
elif module.params['state'] == 'stopped':
if result['status']['ActiveState'] == 'active':
action = 'stop'
result['changed'] = True
else:
action = module.params['state'][:-2] # remove 'ed' from restarted/reloaded
result['state'] = 'started'
result['changed'] = True
if action:
if not module.check_mode:
(rc, out, err) = module.run_command("%s %s '%s'" % (systemctl, action, unit))
if rc != 0:
module.fail_json(msg="Unable to %s service %s: %s" % (action, unit, err))
else:
# this should not happen?
module.fail_json(msg="Service is in unknown state", status=result['status'])
module.exit_json(**result)
if __name__ == '__main__':
main()
|
richardcs/ansible-modules-core
|
system/systemd.py
|
Python
|
gpl-3.0
| 13,996
|
[
"Brian"
] |
1349e007fa13f81e78b83c036f09a0b605e0ed8efa872da377174a05caf1b314
|
#!/usr/bin/env python
#
# $File: popInfo.py $
#
# This file is part of simuPOP, a forward-time population genetics
# simulation environment. Please visit http://simupop.sourceforge.net
# for details.
#
# Copyright (C) 2004 - 2010 Bo Peng (bpeng@mdanderson.org)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# This script is an example in the simuPOP user's guide. Please refer to
# the user's guide (http://simupop.sourceforge.net/manual) for a detailed
# description of this example.
#
import simuPOP as sim
pop = sim.Population(10)
pop.setInfoFields(['a', 'b'])
pop.addInfoFields('c')
pop.addInfoFields(['d', 'e'])
pop.infoFields()
#
# information fields can be accessed in batch mode
pop.setIndInfo([1], 'c')
# as well as individually.
for ind in pop.individuals():
ind.e = ind.c + 1
print(pop.indInfo('e'))
|
BoPeng/simuPOP
|
docs/popInfo.py
|
Python
|
gpl-2.0
| 1,411
|
[
"VisIt"
] |
0877c2a790202924a1e61865ee2631b45749d3002fb8e96f001786117fec4f89
|
# Copyright (C) 2012,2013
# Max Planck Institute for Polymer Research
# Copyright (C) 2008,2009,2010,2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
*******************************************
**DumpXYZ** - IO Object
*******************************************
* `dump()`
write configuration to trajectory XYZ file. By default filename is "out.xyz",
coordinates are folded.
Properties
* `filename`
Name of trajectory file. By default trajectory file name is "out.xyz"
* `unfolded`
False if coordinates are folded, True if unfolded. By default - False
* `append`
True if new trajectory data is appended to existing trajectory file. By default - True
* `length_factor`
If length dimension in current system is nm, and unit is 0.23 nm, for example, then
length_factor should be 0.23
* `length_unit`
It is length unit. Can be LJ, nm or A. By default - LJ
usage:
writing down trajectory
>>> dump_conf_xyz = espresso.io.DumpXYZ(system, integrator, filename='trajectory.xyz')
>>> for i in range (200):
>>> integrator.run(10)
>>> xyz.dump()
writing down trajectory using ExtAnalyze extension
>>> dump_conf_xyz = espresso.io.DumpXYZ(system, integrator, filename='trajectory.xyz')
>>> ext_analyze = espresso.integrator.ExtAnalyze(dump_conf_xyz, 10)
>>> integrator.addExtension(ext_analyze)
>>> integrator.run(2000)
Both exapmles will give the same result: 200 configurations in trajectory .xyz file.
setting up length scale
For example, the Lennard-Jones model for liquid argon with :math:`\sigma=0.34 [nm]`
>>> dump_conf_xyz = espresso.io.DumpXYZ(system, integrator, filename='trj.xyz', unfolded=False, length_factor=0.34, length_unit='nm', append=True)
will produce trj.xyz with in nanometers
"""
from espresso.esutil import cxxinit
from espresso import pmi
from espresso.ParticleAccess import *
from _espresso import io_DumpXYZ
class DumpXYZLocal(ParticleAccessLocal, io_DumpXYZ):
'The (local) storage of configurations.'
def __init__(self, system, integrator, filename='out.xyz', unfolded=False, length_factor=1.0, length_unit='LJ', append=True):
cxxinit(self, io_DumpXYZ, system, integrator, filename, unfolded, length_factor, length_unit, append)
def dump(self):
if not pmi._PMIComm or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.dump(self)
if pmi.isController :
class DumpXYZ(ParticleAccess):
__metaclass__ = pmi.Proxy
pmiproxydefs = dict(
cls = 'espresso.io.DumpXYZLocal',
pmicall = [ 'dump' ],
pmiproperty = ['filename', 'unfolded', 'length_factor', 'length_unit', 'append']
)
|
BackupTheBerlios/espressopp
|
src/io/DumpXYZ.py
|
Python
|
gpl-3.0
| 3,334
|
[
"ESPResSo"
] |
a6f0431d01a0c519566a86fae030cf1667ebaaae5d5737bcff291b6d12f41653
|
"""
Implementation of ADAptive LInear NEuron (Adaline) algorithm from Chapter 2 of
"Python Machine Learning"
"""
import numpy as np
def train_adeline(observations, labels, learning_rate=0.0001, max_training_iterations=100):
"""
Trains a (binary) perceptron, returning a function that can predict / classify
given a new observations, as well as insight into how the training progressed via
a log of the weights and squared errors for each iteration.
:param observations: array of rows
:param labels: correct label classification for each row: [1, -1, 1, 1, ...]
:param learning_rate: how fast to update weights
:param max_training_iterations: max number of times to iterate through observations
:return: (prediction_fn, weights_log, errors_log)
"""
the_weights = np.zeros(1 + observations.shape[1])
weights_log = []
num_errors_log = []
squared_error_log = []
def net_input(observations, weights):
return np.dot(observations, weights[1:]) + weights[0]
def quantized_output(output):
return np.where(output >= 0.0, 1, -1)
def predict(observations, weights=the_weights):
return quantized_output(net_input(observations, weights))
for _ in range(max_training_iterations):
weights_log.append(np.copy(the_weights))
raw_outputs = net_input(observations, the_weights)
errors = labels - raw_outputs
weight_deltas = learning_rate * np.dot(observations.transpose(), errors)
the_weights[1:] += weight_deltas
the_weights[0] += learning_rate * np.sum(errors)
squared_errors = (errors ** 2).sum() / 2.0
num_errors = (quantized_output(raw_outputs) != labels).sum()
squared_error_log.append(squared_errors)
num_errors_log.append(num_errors)
if num_errors == 0:
break
return predict, weights_log, squared_error_log, num_errors_log
|
krosaen/ml-study
|
python-ml-book/ch02/adeline.py
|
Python
|
mit
| 1,919
|
[
"NEURON"
] |
31eac43c2c6f03cd4e833bc5c818f39e7ef4b0e961e51ee4e01d597d26a4e4ee
|
#!/usr/bin/python
"""
fisher.py
"""
from __future__ import print_function
import sys
import os
import re
import pysam
import scipy.special
from scipy.stats import fisher_exact as fisher
import argparse
import logging
import math
import subprocess
from . import print_vcf
from . import print_anno
from . import util
from . import const
import multiprocessing
import copy
from builtins import chr, str
#
# Globals
#
arg = None
target = None
remove_chr = None
filter_quals = None
#
# Class definitions
#
############################################################
def math_log_fisher_pvalue(fisher_pvalue):
val = float(0.0)
if fisher_pvalue < 10**(-60):
val = float(60.0)
elif fisher_pvalue > 1.0 - 10**(-10) :
val = float(0.0)
else:
val = -math.log( fisher_pvalue, 10 )
return val
############################################################
def Pileup_out( mpileup, w, min_depth, min_variant_read, compare ):
#
# mpileup format
#
# chr1 272 T 24 ,.$.....,,.,.,...,,,.,..^+. <<<+;<<<<<<<<<<<=<;<;7<&
#
# 0 chromosome,
# 1 1-based coordinate,
# 2 reference base,
# 3 the number of reads covering the site (1)
# 4 read bases (1)
# 5 base qualities (1)
# 6 the number of reads covering the site (2)
# 7 read bases (2)
# 8 base qualities (2)
#
global target
global remove_chr
global filter_quals
#
# Prepare mpileup data
#
# mp_list = str( mpileup.translate( None, '\n' ) ).split( '\t' )
if sys.version_info.major == 3:
mp_list = mpileup.decode().strip('\n').split( '\t' )
else:
mp_list = mpileup.strip('\n').split( '\t' )
mp_list_len = len( mp_list )
ref_base_U = mp_list[ 2 ].upper()
coordinate = mp_list[ 0:3 ]
#
# skip if depth is 0
#
if mp_list[ 3 ] == '0' or ( mp_list_len > 6 and mp_list[ 6 ] == '0' ):
# if int(mp_list[ 3 ]) < min_depth or ( mp_list_len > 6 and int(mp_list[ 6 ]) < min_depth ):
return None
ref_base_plus = mp_list[ 4 ].count('.')
ref_base_minus = mp_list[ 4 ].count(',')
ref_base_count = mp_list[ 4 ].count('.') + mp_list[ 4 ].count(',')
ins_base_count = mp_list[ 4 ].count('+')
del_base_count = mp_list[ 4 ].count('-')
if (int(mp_list[ 3 ]) - ref_base_count + ins_base_count + del_base_count) < min_variant_read:
return None
if ref_base_U not in 'ACGTN': return None
#
# data_pair IDs
# const.POS_CHR = 0
# const.POS_COORD = 1
# const.POS_REF = 2
# const.POS_DATA1 = 3
# const.POS_DATA2 = 4
# const.POS_FISHER_SNV = 5
# const.POS_FISHER_INS = 6
# const.POS_FISHER_DEL = 7
# const.POS_COUNT = 8
#
data_pair = [ mp_list[ 0 ],
int( mp_list[ 1 ] ),
mp_list[ 2 ],
{ 'mis_base': ref_base_U, 'mis_rate': 0, 'proper_read_depth': 0, 'proper_read_depth_plus': 0, 'proper_read_depth_minus': 0, 'proper_read_depth_indel': 0, 'proper_read_depth_indel_plus': 0, 'proper_read_depth_indel_minus': 0,'indel': util.AutoVivification() },
{ 'mis_base': ref_base_U, 'mis_rate': 0, 'proper_read_depth': 0, 'proper_read_depth_plus': 0, 'proper_read_depth_minus': 0, 'proper_read_depth_indel': 0, 'proper_read_depth_indel_plus': 0, 'proper_read_depth_indel_minus': 0,'indel': util.AutoVivification() },
1.0,
'N:1.0',
'N:1.0',
0 ]
#
# Loop for 2 bam file case
#
if compare:
data_pair[ const.POS_COUNT ] = 2
input_list = [ ( const.POS_DATA1, mp_list[ 3 ], mp_list[ 4 ], mp_list[ 5 ] ),
( const.POS_DATA2, mp_list[ 6 ], mp_list[ 7 ], mp_list[ 8 ] ) ]
else:
data_pair[ const.POS_COUNT ] = 1
input_list = [ ( const.POS_DATA1, mp_list[ 3 ], mp_list[ 4 ], mp_list[ 5 ] ) ]
#
# position id,
# mpileup output 4th row(number of read covering the site),
# 5th row(read bases),
# 6th row(base quality)
#
for data_id, depth, read_bases, qual_list in input_list:
indel = util.AutoVivification()
#
# Look for deletion/insertion and save info in 'indel' dictionary
#
# ([\+\-])[0-9]+[ACGTNacgtn]+
#
# m.group(1): + or - (deletion/insertion)
# m.group(2): number of deletion/insertion
# m.group(3): nucleotides
#
deleted = 0
iter = target.finditer( read_bases )
for m in iter:
site = m.start()
type = m.group( 1 )
num = m.group( 2 )
bases = m.group( 3 )[ 0:int( num ) ]
if bases.islower():
strand = ( '-', '+' )
else:
strand = ( '+', '-' )
key = '\t'.join( coordinate + [ bases.upper() ] )
if type in indel and key in indel[ type ]:
indel[ type ][ key ][ strand[ 0 ] ] += 1
else:
indel[ type ][ key ][ strand[ 0 ] ] = 1
indel[ type ][ key ][ strand[ 1 ] ] = 0
read_bases = read_bases[ 0:site - deleted ] + read_bases[ site + int(num) + len( num ) + 1 - deleted: ]
deleted += 1 + len( num ) + int( num )
#
# Remove '^.' and '$'
#
read_bases = remove_chr.sub( '', read_bases )
read_bases = read_bases.replace( '$', '' )
#
# Error check
#
if len( read_bases ) != len( qual_list ):
logging.error( "mpileup data is not good: {0}, {1}".format( mpileup, read_bases ) )
return None
#
# Count mismatch
#
mis_base_U = None
if int( depth ) >= min_depth:
read_bases = read_bases.replace( '.', ref_base_U )
read_bases = read_bases.replace( ',', ref_base_U.lower() )
base_num = {
"total_A": 0,
"total_C": 0,
"total_G": 0,
"total_T": 0,
"total_N": 0,
"A": 0,
"C": 0,
"G": 0,
"T": 0,
"N": 0,
"a": 0,
"c": 0,
"g": 0,
"t": 0,
"n": 0
}
#
# Set data
#
data_pair[ data_id ][ 'bases' ] = read_bases
data_pair[ data_id ][ 'depth' ] = int( depth )
#
# Count number
#
for nuc, qual in zip( read_bases, qual_list ):
if nuc in 'ATGCNacgtn':
data_pair[ data_id ][ 'proper_read_depth_indel' ] += 1
if nuc in 'ATGCN':
data_pair[ data_id ][ 'proper_read_depth_indel_plus' ] += 1
if nuc in 'acgtn':
data_pair[ data_id ][ 'proper_read_depth_indel_minus' ] += 1
if nuc in 'ATGCNacgtn' and not ( qual in filter_quals) :
base_num[ nuc ] += 1
base_num[ 'total_' + nuc.upper() ] += 1
if nuc in 'ATGCatgc' and not ( qual in filter_quals):
data_pair[ data_id ][ 'proper_read_depth' ] += 1
if nuc in 'ATGC' and not ( qual in filter_quals):
data_pair[ data_id ][ 'proper_read_depth_plus' ] += 1
if nuc in 'atgc' and not ( qual in filter_quals):
data_pair[ data_id ][ 'proper_read_depth_minus' ] += 1
#
# InsDel
# Beta distribution
#
for type in ( '+', '-' ):
if type in indel:
for key in indel[ type ].keys():
bases = key.split( '\t' )[ 3 ]
data_pair[ data_id ][ 'indel' ][ type ][ bases ][ '+' ] = indel[ type ][ key ][ '+' ]
data_pair[ data_id ][ 'indel' ][ type ][ bases ][ '-' ] = indel[ type ][ key ][ '-' ]
indel_number = \
data_pair[ data_id ][ 'indel' ][ type ][ bases ][ 'both' ] = ( indel[ type ][ key ][ '-' ] +
indel[ type ][ key ][ '+' ] )
data_pair[ data_id ][ 'indel' ][ type ][ bases ][ '0.1' ] = \
scipy.special.btdtri( indel_number + 1, float( data_pair[ data_id ][ 'proper_read_depth_indel' ] ) - indel_number + 1, 0.1 )
data_pair[ data_id ][ 'indel' ][ type ][ bases ][ 'mid' ] = \
( indel_number + 1 ) / ( float( data_pair[ data_id ][ 'proper_read_depth_indel' ] ) + 2 )
data_pair[ data_id ][ 'indel' ][ type ][ bases ][ '0.9' ] = \
scipy.special.btdtri( indel_number + 1, int( data_pair[ data_id ][ 'proper_read_depth_indel' ] ) - indel_number + 1, 0.9 )
data_pair[ data_id ][ 'indel' ][ type ][ bases ][ 's_ratio' ] = \
float( indel[ type ][ key ][ '+' ] ) / data_pair[ data_id ][ 'indel' ][ type ][ bases ][ 'both' ]
#
# skip if reference is 'N'
#
if ref_base_U != 'N' and int( data_pair[ data_id ][ 'proper_read_depth' ] ) >= min_depth:
ref_num = base_num[ 'total_' + ref_base_U ]
mis_num = 0
for nuc in ( 'A', 'C', 'G', 'T' ):
data_pair[ data_id ][ nuc ] = base_num[ nuc ]
tmp = nuc.lower()
data_pair[ data_id ][ tmp ] = base_num[ tmp ]
tmp = 'total_' + nuc
data_pair[ data_id ][ tmp ] = base_num[ tmp ]
if nuc != ref_base_U:
if base_num[ tmp ] > mis_num:
mis_num = base_num[ tmp ]
mis_base_U = nuc
if data_id == const.POS_DATA2 and data_pair[ const.POS_DATA1 ][ 'mis_base' ]:
mis_num = base_num[ 'total_' + data_pair[ const.POS_DATA1 ][ 'mis_base' ] ]
mis_base_U = data_pair[ const.POS_DATA1 ][ 'mis_base' ]
####
#
# Calculate ratio
#
data_pair[ data_id ][ 'mis_rate' ] = mis_num / float( data_pair[ data_id ][ 'proper_read_depth' ] )
data_pair[ data_id ][ 'mis_base' ] = mis_base_U
if mis_base_U and ( base_num[ mis_base_U ] + base_num[ mis_base_U.lower() ] ) > 0:
data_pair[ data_id ][ 's_ratio' ] = float( base_num[ mis_base_U ] ) / ( base_num[ mis_base_U ] + base_num[ mis_base_U.lower() ] )
# else:
# data_pair[ data_id ][ 's_ratio' ] = float(0)
#
# Beta distribution for SNV
#
data_pair[ data_id ][ '0.1' ] = scipy.special.btdtri( mis_num + 1, ref_num + 1, 0.1 )
data_pair[ data_id ][ 'mid' ] = ( mis_num + 1 ) / float( ref_num + mis_num + 2 )
data_pair[ data_id ][ '0.9' ] = scipy.special.btdtri( mis_num + 1, ref_num + 1, 0.9 )
data_pair[ data_id ][ 'mis_num' ] = mis_num
###
#
# Fisher
#
# SNV
#
if ( data_pair[ const.POS_COUNT ] == 2 and
ref_base_U and
data_pair[ const.POS_DATA1 ][ 'mis_base' ] and
'mid' in data_pair[ const.POS_DATA1 ].keys() and
'mid' in data_pair[ const.POS_DATA2 ].keys() and
'proper_read_depth' in data_pair[ const.POS_DATA1 ].keys() and
'proper_read_depth' in data_pair[ const.POS_DATA2 ].keys()
):
odds_ratio, fisher_pvalue = fisher(
( ( int( data_pair[ const.POS_DATA1 ][ 'total_' + ref_base_U ] ),
int( data_pair[ const.POS_DATA2 ][ 'total_' + ref_base_U ] ) ),
( int( data_pair[ const.POS_DATA1 ][ 'total_' + data_pair[ const.POS_DATA1 ][ 'mis_base' ] ] ),
int( data_pair[ const.POS_DATA2 ][ 'total_' + data_pair[ const.POS_DATA1 ][ 'mis_base' ] ] ) ) ),
alternative='two-sided'
)
data_pair[ const.POS_FISHER_SNV ] = math_log_fisher_pvalue(fisher_pvalue)
#
# INDEL
#
if ( data_pair[ const.POS_COUNT ] == 2 and 'indel' in data_pair[ const.POS_DATA1 ]
):
fisher_pvalue = None
for type in data_pair[ const.POS_DATA1 ][ 'indel' ]:
for bases in data_pair[ const.POS_DATA1 ][ 'indel' ][ type ].keys():
# if type in data_pair[ const.POS_DATA1 ][ 'indel' ] and bases in data_pair[ const.POS_DATA1 ][ 'indel' ][ type ]:
if not isinstance( data_pair[ const.POS_DATA2 ][ 'indel' ][ type ][ bases ][ 'both' ], int ):
data_pair[ const.POS_DATA2 ][ 'indel' ][ type ][ bases ][ 'both' ] = 0
data_pair[ const.POS_DATA2 ][ 'indel' ][ type ][ bases ][ '+' ] = 0
data_pair[ const.POS_DATA2 ][ 'indel' ][ type ][ bases ][ '-' ] = 0
if (data_pair[ const.POS_DATA2 ][ 'proper_read_depth_indel' ] >= data_pair[ const.POS_DATA2 ][ 'indel' ][ type ][ bases ][ 'both' ] and
data_pair[ const.POS_DATA1 ][ 'proper_read_depth_indel' ] >= data_pair[ const.POS_DATA1 ][ 'indel' ][ type ][ bases ][ 'both' ]
):
odds_ratio, fisher_pvalue = fisher(
( ( data_pair[ const.POS_DATA1 ][ 'proper_read_depth_indel' ] - data_pair[ const.POS_DATA1 ][ 'indel' ][ type ][ bases ][ 'both' ],
data_pair[ const.POS_DATA1 ][ 'indel' ][ type ][ bases ][ 'both' ] ),
( data_pair[ const.POS_DATA2 ][ 'proper_read_depth_indel' ] - data_pair[ const.POS_DATA2 ][ 'indel' ][ type ][ bases ][ 'both' ],
data_pair[ const.POS_DATA2 ][ 'indel' ][ type ][ bases ][ 'both' ]) ),
alternative='two-sided' )
if fisher_pvalue != None:
if type == '+':
data_id = const.POS_FISHER_INS
elif type == '-':
data_id = const.POS_FISHER_DEL
if data_pair[ data_id ] == 'N:1.0':
data_pair[ data_id ] = bases + ':' + str( math_log_fisher_pvalue(fisher_pvalue) )
else:
data_pair[ data_id ] += ',' + bases + ':' + str( math_log_fisher_pvalue(fisher_pvalue) )
return data_pair
############################################################
def Pileup_command(
regions,
cmd_list,
min_depth,
min_variant_read,
mismatch_rate_disease,
mismatch_rate_normal,
post_10_q,
fisher_threshold,
is_anno,
out_file,
compare_flag,
positions_bed,
w
):
end_idx = 1
if regions:
region_list = regions.split(",")
end_idx =len(region_list)
with open(os.devnull, 'w') as FNULL:
for idx in range(end_idx):
cmd_list_copy = []
cmd_list_copy = copy.deepcopy(cmd_list)
if regions:
cmd_list_copy.insert(2, '-r')
cmd_list_copy.insert(3, region_list[idx])
if positions_bed:
cmd_list_copy.insert(2, '-l')
cmd_list_copy.insert(3, positions_bed)
pileup = subprocess.Popen(cmd_list_copy, stdout=subprocess.PIPE)
for mpileup in pileup.stdout:
data = Pileup_out( mpileup, w, min_depth, min_variant_read, compare_flag)
if data:
if is_anno:
print_anno.print_data( data, w, min_depth, mismatch_rate_disease, mismatch_rate_normal, post_10_q, fisher_threshold, min_variant_read )
else:
print_vcf.print_data( data, w, min_depth, mismatch_rate_disease, mismatch_rate_normal, post_10_q, fisher_threshold, min_variant_read )
pileup.stdout.close()
pileup.wait()
############################################################
def Pileup_command_multi_thread(
regions,
cmd_list,
min_depth,
min_variant_read,
mismatch_rate_disease,
mismatch_rate_normal,
post_10_q,
fisher_threshold,
is_anno,
out_file,
thread_str,
compare_flag
):
with open(out_file + thread_str, 'w') as w, open(os.devnull, 'w') as FNULL:
region_list = regions.split(",")
for idx,target_region in enumerate(region_list):
cmd_list_copy = []
cmd_list_copy = copy.deepcopy(cmd_list)
if target_region:
cmd_list_copy.insert(2, '-r')
cmd_list_copy.insert(3, target_region)
pileup = subprocess.Popen(cmd_list_copy, stdout=subprocess.PIPE)
for mpileup in pileup.stdout:
data = Pileup_out( mpileup, w, min_depth, min_variant_read, compare_flag)
if data:
if is_anno:
print_anno.print_data( data, w, min_depth, mismatch_rate_disease, mismatch_rate_normal, post_10_q, fisher_threshold, min_variant_read )
else:
print_vcf.print_data( data, w, min_depth, mismatch_rate_disease, mismatch_rate_normal, post_10_q, fisher_threshold, min_variant_read )
pileup.stdout.close()
pileup.wait()
############################################################
def Print_header(
w,
in_bam1,
in_bam2,
sample1,
sample2,
ref_fa,
is_anno
):
#
# Print metadata only for VCF.
#
if not is_anno:
ref_name, ext = os.path.splitext(ref_fa)
print_vcf.print_meta(w, ref_name + ".dict", sample1, sample2, in_bam1, in_bam2, ref_fa)
#
# Print header
#
if in_bam1 and in_bam2:
if is_anno:
print_anno.print_header_pair(w)
else:
print_vcf.print_header_pair(w, sample1, sample2)
elif in_bam1:
if is_anno:
print_anno.print_header_single(w)
else:
print_vcf.print_header_single(w, sample1)
############################################################
def Pileup_and_count(
in_bam1,
in_bam2,
sample1,
sample2,
out_file,
ref_fa,
baseq_thres,
mismatch_rate_disease,
mismatch_rate_normal,
post_10_q,
fisher_threshold,
min_depth,
header_flag,
min_variant_read,
samtools,
samtools_params,
region,
region_file,
positions_bed,
is_anno
):
global target
global remove_chr
global filter_quals
#
# Initalize filter quality values
#
filter_quals = ''
for qual in range( 33, 33 + baseq_thres ):
filter_quals += str( chr( qual ) )
#
# Setup regular expression
# ([\+\-])[0-9]+[ACGTNacgtn]+
#
target = re.compile( '([\+\-])([0-9]+)([ACGTNRMacgtnrm]+)' )
remove_chr = re.compile( '\^.' )
samtools_params_list = samtools_params.split(" ")
region_list = []
if region_file:
with open(region_file, 'r') as hin:
for line in hin:
region_list.append(line.rstrip('\n'))
if in_bam1 and in_bam2:
cmd_list = [samtools,'mpileup','-f',ref_fa]
cmd_list.extend(samtools_params_list)
cmd_list.extend([in_bam1, in_bam2])
compare_flag = True
elif in_bam1 or in_bam2:
in_bam1 = in_bam1 if in_bam1 else in_bam2
sample1 = sample1 if sample1 else sample2
in_bam2 = None
sample2 = None
cmd_list = [samtools,'mpileup','-f',ref_fa]
cmd_list.extend(samtools_params_list)
cmd_list.extend([in_bam1])
compare_flag = False
else:
logging.error( "Input file: {file} not found.".format( file = in_bam1 +" "+ in_bam2 ) )
raise ValueError()
#
# multi thread
#
if len(region_list) > 0:
jobs = []
for idx, target_regions in enumerate(region_list):
proc = multiprocessing.Process(target = Pileup_command_multi_thread, \
args = (target_regions, cmd_list, min_depth, min_variant_read, mismatch_rate_disease, mismatch_rate_normal, post_10_q, fisher_threshold, is_anno, out_file, "."+str(idx), compare_flag))
jobs.append(proc)
proc.start()
for idx,target_regions in enumerate(region_list):
jobs[idx].join()
with open(out_file + ".unsorted", 'w') as w:
for idx,target_regions in enumerate(region_list):
with open(out_file +"."+ str(idx), 'r') as hin:
for line in hin:
print(line.rstrip('\n'), file=w)
subprocess.check_output(["sort", "-k1,1", "-k2,2n", "-V", "-o", out_file+".sorted", out_file+".unsorted"])
with open(out_file, 'w') as w:
if header_flag:
Print_header(w, in_bam1, in_bam2, sample1, sample2, ref_fa, is_anno)
with open(out_file +".sorted", 'r') as hin:
for line in hin:
print(line.rstrip('\n'), file=w)
for idx, target_regions in enumerate(region_list):
os.remove(out_file +"."+ str(idx))
os.remove(out_file +".sorted")
os.remove(out_file +".unsorted")
#
# single thread
#
else:
with open(out_file, 'w') as w:
if header_flag:
Print_header(w, in_bam1, in_bam2, sample1, sample2, ref_fa, is_anno)
Pileup_command(region, cmd_list, min_depth, min_variant_read, mismatch_rate_disease, mismatch_rate_normal, post_10_q, fisher_threshold, is_anno, out_file, compare_flag, positions_bed, w)
|
Genomon-Project/GenomonFisher
|
genomon_fisher/fisher.py
|
Python
|
gpl-3.0
| 22,179
|
[
"pysam"
] |
8ef6f886b4fb51e7d9737e8ab4599c3680cf3b89d8a7d5e98491e124d535e485
|
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2017 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This file is part of Psi4.
#
# Psi4 is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3.
#
# Psi4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with Psi4; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
"""
List of GGA SuperFunctionals built from LibXC primitives.
"""
from psi4 import core
def build_svwn_superfunctional(name, npoints, deriv, restricted):
# Call this first
sup = core.SuperFunctional.blank()
sup.set_max_points(npoints)
sup.set_deriv(deriv)
# => User-Customization <= #
# No spaces, keep it short and according to convention
sup.set_name('SVWN')
sup.set_description(' SVWN3 (RPA) LSDA Functional\n')
sup.set_citation(' Adamson et. al., J. Comput. Chem., 20(9), 921-927, 1999\n')
# Add member functionals
sup.add_x_functional(core.LibXCFunctional('XC_LDA_X', restricted))
sup.add_c_functional(core.LibXCFunctional('XC_LDA_C_VWN_RPA', restricted))
# Call this last
sup.allocate()
return (sup, False)
def build_blyp_superfunctional(name, npoints, deriv, restricted):
# Call this first
sup = core.SuperFunctional.blank()
sup.set_max_points(npoints)
sup.set_deriv(deriv)
# => User-Customization <= #
# No spaces, keep it short and according to convention
sup.set_name('BLYP')
sup.set_description(' BLYP GGA Exchange-Correlation Functional\n')
sup.set_citation(' P.J. Stephens et. al., J. Phys. Chem., 98, 11623-11627, 1994\n B. Miehlich et. al., Chem. Phys. Lett., 157(3), 200-206 1989\n')
# Add member functionals
sup.add_x_functional(core.LibXCFunctional('XC_GGA_X_B88', restricted))
sup.add_c_functional(core.LibXCFunctional('XC_GGA_C_LYP', restricted))
# Call this last
sup.allocate()
return (sup, False)
def build_bop_superfunctional(name, npoints, deriv, restricted):
# Call this first
sup = core.SuperFunctional.blank()
sup.set_max_points(npoints)
sup.set_deriv(deriv)
# => User-Customization <= #
# No spaces, keep it short and according to convention
sup.set_name('BOP')
sup.set_description(' BOP GGA Exchange-Correlation Functional\n')
sup.set_citation(' T. Tsuneda et. al., J. Chem. Phys. 110, 10664-10678, 1999\n')
# Add member functionals
sup.add_x_functional(core.LibXCFunctional('XC_GGA_X_B88', restricted))
sup.add_c_functional(core.LibXCFunctional('XC_GGA_C_OP_B88', restricted))
# Call this last
sup.allocate()
return (sup, False)
def build_b86bpbe_superfunctional(name, npoints, deriv, restricted):
# Call this first
sup = core.SuperFunctional.blank()
sup.set_max_points(npoints)
sup.set_deriv(deriv)
# => User-Customization <= #
# No spaces, keep it short and according to convention
sup.set_name('B86BPBE')
sup.set_description(' B86BPBE GGA Exchange-Correlation Functional\n')
sup.set_citation(' A. D. Becke, J. Chem. Phys. 85:7184, 1986.\n')
# Add member functionals
sup.add_x_functional(core.LibXCFunctional('XC_GGA_X_B86_MGC', restricted))
sup.add_c_functional(core.LibXCFunctional('XC_GGA_C_PBE', restricted))
# Call this last
sup.allocate()
return (sup, False)
def build_pw86pbe_superfunctional(name, npoints, deriv, restricted):
# Call this first
sup = core.SuperFunctional.blank()
sup.set_max_points(npoints)
sup.set_deriv(deriv)
# => User-Customization <= #
# No spaces, keep it short and according to convention
sup.set_name('PW86PBE')
sup.set_description(' PW86PBE GGA Exchange-Correlation Functional\n')
sup.set_citation(' J. P. Perdew and W. Yue, Phys. Rev. B 33:8800(R), 1986.\n')
# Add member functionals
sup.add_x_functional(core.LibXCFunctional('XC_GGA_X_PW86', restricted))
sup.add_c_functional(core.LibXCFunctional('XC_GGA_C_PBE', restricted))
# Call this last
sup.allocate()
return (sup, False)
def build_pbe_superfunctional(name, npoints, deriv, restricted):
# Call this first
sup = core.SuperFunctional.blank()
sup.set_max_points(npoints)
sup.set_deriv(deriv)
# => User-Customization <= #
# No spaces, keep it short and according to convention
sup.set_name('PBE')
sup.set_description(' PBE GGA Exchange-Correlation Functional\n')
sup.set_citation(' J.P. Perdew et. al., Phys. Rev. Lett., 77(18), 3865-3868, 1996\n')
# Add member functionals
sup.add_x_functional(core.LibXCFunctional('XC_GGA_X_PBE', restricted))
sup.add_c_functional(core.LibXCFunctional('XC_GGA_C_PBE', restricted))
# Call this last
sup.allocate()
return (sup, False)
# def build_wsvwn_superfunctional(name, npoints, deriv, restricted):
# # Call this first
# sup = core.SuperFunctional.blank()
# sup.set_max_points(npoints)
# sup.set_deriv(deriv)
# # => User-Customization <= #
# # No spaces, keep it short and according to convention
# sup.set_name('wSVWN')
# sup.set_description(' LSDA SR-XC Functional\n')
# sup.set_citation(' Adamson et. al., J. Comput. Chem., 20(9), 921-927, 1999\n')
# # Add member functionals
# wS_X = core.LibXCFunctional('wS_X')
# sup.add_x_functional(wS_X)
# sup.add_c_functional(core.LibXCFunctional('VWN3RPA_C'))
# # Set GKS up after adding functionals
# sup.set_x_omega(0.3)
# sup.set_c_omega(0.0)
# sup.set_x_alpha(0.0)
# sup.set_c_alpha(0.0)
# # => End User-Customization <= #
# # Call this last
# sup.allocate()
# return (sup, False)
def build_pw91_superfunctional(name, npoints, deriv, restricted):
# Call this first
sup = core.SuperFunctional.blank()
sup.set_max_points(npoints)
sup.set_deriv(deriv)
# => User-Customization <= #
# No spaces, keep it short and according to convention
sup.set_name('PW91')
sup.set_description(' PW91 GGA Exchange-Correlation Functional\n')
sup.set_citation(' J.P. Perdew et. al., Phys. Rev. B., 46(11), 6671-6687, 1992\n')
# Add member functionals
sup.add_x_functional(core.LibXCFunctional('XC_GGA_X_PW91', restricted))
sup.add_c_functional(core.LibXCFunctional('XC_GGA_C_PW91', restricted))
# Call this last
sup.allocate()
return (sup, False)
def build_bp86_superfunctional(name, npoints, deriv, restricted):
# Call this first
sup = core.SuperFunctional.blank()
sup.set_max_points(npoints)
sup.set_deriv(deriv)
# => User-Customization <= #
# No spaces, keep it short and according to convention
sup.set_name('BP86')
sup.set_description(' BP86 GGA Exchange-Correlation Functional\n')
sup.set_citation(' Null\n')
# Add member functionals
sup.add_x_functional(core.LibXCFunctional('XC_GGA_X_B88', restricted))
sup.add_c_functional(core.LibXCFunctional('XC_GGA_C_P86', restricted))
# Call this last
sup.allocate()
return (sup, False)
def build_ft97_superfunctional(name, npoints, deriv, restricted):
# Call this first
sup = core.SuperFunctional.blank()
sup.set_max_points(npoints)
sup.set_deriv(deriv)
# => User-Customization <= #
# No spaces, keep it short and according to convention
sup.set_name('FT97')
sup.set_description(' FT97 GGA Exchange-Correlation Functional\n')
sup.set_citation(' M. Filatov and W. Theil, Int. J. Quant. Chem., 62, 603-616, 1997\n')
# Add member functionals
sup.add_x_functional(core.LibXCFunctional('XC_GGA_X_FT97_B', restricted))
sup.add_c_functional(core.LibXCFunctional('XC_GGA_C_FT97', restricted))
# Call this last
sup.allocate()
return (sup, False)
gga_superfunc_list = {
"b86bpbe" : build_b86bpbe_superfunctional,
"blyp" : build_blyp_superfunctional,
"svwn" : build_svwn_superfunctional,
"pw86pbe" : build_pw86pbe_superfunctional,
"pbe" : build_pbe_superfunctional,
"bp86" : build_bp86_superfunctional,
"pw91" : build_pw91_superfunctional,
"ft97" : build_ft97_superfunctional,
"bop" : build_bop_superfunctional,
}
|
rmcgibbo/psi4public
|
psi4/driver/procrouting/dft_funcs/gga_superfuncs.py
|
Python
|
lgpl-3.0
| 8,817
|
[
"Psi4"
] |
9172bf573b6621f96b340f6dc1a619bd044e9bd00c2f18ce5a90f45bafe5f250
|
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
from .MeshViewerPlugin import MeshViewerPlugin
from .MeshCameraPlugin import MeshCameraPlugin
|
harterj/moose
|
python/peacock/Input/plugins/__init__.py
|
Python
|
lgpl-2.1
| 403
|
[
"MOOSE"
] |
864abbc479c278f1542230b39a1c5a56eba768e6e71d0a35c1a53145a556153b
|
'''
PartsGenie (c) University of Manchester 2017
PartsGenie is licensed under the MIT License.
To view a copy of this license, visit <http://opensource.org/licenses/MIT/>.
@author: neilswainston
'''
# pylint: disable=too-many-arguments
# pylint: disable=too-many-branches
# pylint: disable=too-many-locals
# pylint: disable=too-many-statements
import math
import random
import re
from Bio.Seq import Seq
from synbiochem.utils import seq_utils
from parts_genie.nucl_acid_utils import NuPackRunner
_START_CODON_PATT = r'(?=([ACGT]TG))'
_RT_EFF = 2.222
_K = 2500.0
class RbsCalculator(object):
'''Class for calculating RBS.'''
def __init__(self, r_rna, temp=37.0):
self.__r_rna = r_rna.upper()
self.__runner = NuPackRunner(temp)
self.__optimal_spacing = 5
self.__cutoff = 35
def calc_dgs(self, m_rna, limit=float('inf')):
''''Calculates each dg term in the free energy model and sums them to
create dg_total.'''
m_rna = m_rna.upper()
start_positions = []
dgs_tirs = []
count = 0
for match in re.finditer(_START_CODON_PATT, m_rna):
start_pos = match.start()
try:
d_g = self.__calc_dg(m_rna, start_pos)
if not math.isinf(d_g):
start_positions.append(start_pos)
dgs_tirs.append((d_g, get_tir(d_g)))
count += 1
except ValueError:
# Occurs when start codon appears at start of sequence, and is
# therefore leaderless. Take no action, as safe to ignore.
continue
if count == limit:
break
return dict(zip(start_positions, dgs_tirs))
def calc_kinetic_score(self, m_rna, start_pos, dangles='none'):
'''Gets kinetic score.'''
sub_m_rna = \
m_rna[max(0, start_pos - self.__cutoff):min(len(m_rna),
start_pos +
self.__cutoff)]
_, bp_xs, bp_ys = self.__runner.mfe([sub_m_rna], dangles=dangles)
largest_range_helix = 0
for (nt_x, nt_y) in zip(bp_xs[0], bp_ys[0]):
if nt_x <= len(sub_m_rna) and nt_y <= len(sub_m_rna):
val = nt_y - nt_x
largest_range_helix = max(val, largest_range_helix)
return float(largest_range_helix) / float(len(sub_m_rna))
def get_initial_rbs(self, rbs_len, cds, tir_target_rel):
'''Generates random initial condition for designing a synthetic rbs
sequence.'''
dg_target_rel = get_dg(tir_target_rel)
cds = cds.upper()
dg_range_high = 25.0
dg_range_low = -18.0
dg_target_rel = (dg_target_rel - dg_range_high) / \
(dg_range_low - dg_range_high)
# 0.0: Low expression
# 1.0: High expression
if dg_target_rel < 0.125:
prob_shine_delgano = 0.50
core_length = 4
max_nonoptimal_spacing = 10
elif dg_target_rel < 0.250:
prob_shine_delgano = 0.50
core_length = 4
max_nonoptimal_spacing = 10
elif dg_target_rel < 0.5:
prob_shine_delgano = 0.75
core_length = 4
max_nonoptimal_spacing = 10
elif dg_target_rel < 0.7:
prob_shine_delgano = 0.75
core_length = 4
max_nonoptimal_spacing = 5
elif dg_target_rel < 0.8:
prob_shine_delgano = 0.75
core_length = 6
max_nonoptimal_spacing = 5
elif dg_target_rel < 0.9:
prob_shine_delgano = 0.90
core_length = 6
max_nonoptimal_spacing = 5
elif dg_target_rel < 0.95:
prob_shine_delgano = 0.90
core_length = 8
max_nonoptimal_spacing = 3
else:
prob_shine_delgano = 1.0
core_length = 9
max_nonoptimal_spacing = 2
shine_delgano = Seq(self.__r_rna).reverse_complement()
return self.__get_random_rbs(rbs_len, shine_delgano,
prob_shine_delgano, core_length,
max_nonoptimal_spacing)
def __calc_dg(self, m_rna, start_pos):
'''Calculates dG.'''
# Set dangles based on length between 5' end of m_rna and start codon:
max_rbs_len = 35
if start_pos > max_rbs_len:
dangles = 'none'
else:
dangles = 'all'
# Start codon energy:
start_codon_energies = {'ATG': -1.194, 'GTG': -0.0748, 'TTG': -0.0435,
'CTG': -0.03406}
dg_start = start_codon_energies[m_rna[start_pos:start_pos + 3]]
# Energy of m_rna folding:
[dg_m_rna, _, _] = \
self.__calc_dg_m_rna(m_rna, start_pos, dangles)
# Energy of m_rna:r_rna hybridization and folding:
[dg_m_rna_r_rna, m_rna_subseq, bp_x, bp_y, energy_before] = \
self.__calc_dg_m_rna_r_rna(m_rna, start_pos, dangles)
# Standby site correction:
dg_standby = self.__calc_dg_standby_site(m_rna_subseq, bp_x,
bp_y, energy_before,
dangles)
# Total energy is m_rna:r_rna + start - r_rna - m_rna - standby_site:
return dg_m_rna_r_rna + dg_start - dg_m_rna - dg_standby
def __calc_dg_m_rna(self, m_rna, start_pos, dangles='all'):
'''Calculates the dg_m_rna given the m_rna sequence.'''
m_rna_subseq = \
m_rna[max(0, start_pos - self.__cutoff):min(len(m_rna),
start_pos +
self.__cutoff)]
energies, bp_xs, bp_ys = self.__runner.mfe([m_rna_subseq],
dangles=dangles)
return energies[0], bp_xs[0], bp_ys[0]
def __calc_dg_m_rna_r_rna(self, m_rna, start_pos, dangles):
'''Calculates the dg_m_rna_r_rna from the m_rna and r_rna sequence.
Considers all feasible 16S r_rna binding sites and includes the effects
of non-optimal spacing.'''
energy_cutoff = 3.0
# Footprint of the 30S complex that prevents formation of secondary
# structures downstream of the start codon. Here, we assume that the
# entire post-start RNA sequence does not form secondary structures
# once the 30S complex has bound.
footprint = 1000
begin = max(0, start_pos - self.__cutoff)
m_rna_len = min(len(m_rna), start_pos + self.__cutoff)
start_pos_in_subsequence = min(start_pos, self.__cutoff)
startpos_to_end_len = m_rna_len - start_pos_in_subsequence - begin
# 1. identify a list of r_rna-binding sites. Binding sites are
# hybridizations between the m_rna and r_rna and can include
# mismatches, bulges, etc. Intra-molecular folding is also allowed
# within the m_rna.
# The subopt program is used to generate a list of optimal & suboptimal
# binding sites.
# Constraints: the entire r_rna-binding site must be upstream of the
# start codon
m_rna_subseq = m_rna[begin:start_pos]
if not m_rna_subseq:
raise ValueError('Warning: There is a leaderless start codon, ' +
'which is being ignored.')
energies, bp_xs, bp_ys = self.__runner.subopt([m_rna_subseq,
self.__r_rna],
energy_cutoff,
dangles=dangles)
if not bp_xs:
raise ValueError(
'Warning: The 16S r_rna has no predicted binding site. ' +
'Start codon is considered as leaderless and ignored.')
# 2. Calculate dg_spacing for each 16S r_rna binding site
# Calculate the aligned spacing for each binding site in the list
aligned_spacing = []
for (bp_x, bp_y) in zip(bp_xs,
bp_ys):
aligned_spacing.append(
self.__calc_aligned_spacing(m_rna_subseq,
start_pos_in_subsequence,
bp_x, bp_y))
dg_spacing_list = []
dg_m_rna_r_rna = []
dg_m_rna_r_rna_spacing = []
# Calculate dg_spacing using aligned spacing value. Add it to
# dg_m_rna_r_rna.
for counter in range(len(bp_xs)):
dg_m_rna_r_rna.append(energies[counter])
val = self.__calc_dg_spacing(aligned_spacing[counter])
dg_spacing_list.append(val)
dg_m_rna_r_rna_spacing.append(
val + energies[counter])
# 3. Find 16S r_rna binding site that minimizes
# dg_spacing+dg_m_rna_r_rna.
index = dg_m_rna_r_rna_spacing.index(min(dg_m_rna_r_rna_spacing))
dg_spacing_final = dg_spacing_list[index]
# Check: Is the dg spacing large compared to the energy gap? If so,
# this means the list of suboptimal 16S r_rna binding sites generated
# by subopt is too short.
# if dg_spacing_final > energy_cutoff:
# print 'Warning: The spacing penalty is greater than the ' + \
# 'energy gap. dg (spacing) = ', dg_spacing_final
# 4. Identify the 5' and 3' ends of the identified 16S r_rna binding
# site. Create a base pair list.
most_5p_m_rna = float('inf')
most_3p_m_rna = -float('inf')
# Generate a list of r_rna-m_rna base paired nucleotides
bp_x_target = []
bp_y_target = []
bp_x = bp_xs[index]
bp_y = bp_ys[index]
for (nt_x, nt_y) in zip(bp_x, bp_y):
if nt_y > len(m_rna_subseq): # nt is r_rna
most_5p_m_rna = min(most_5p_m_rna, bp_x[bp_y.index(nt_y)])
most_3p_m_rna = max(most_3p_m_rna, bp_x[bp_y.index(nt_y)])
bp_x_target.append(nt_x)
bp_y_target.append(nt_y)
# The r_rna-binding site is between the nucleotides at positions
# most_5p_m_rna and most_3p_m_rna
# Now, fold the pre-sequence, r_rna-binding-sequence and post-sequence
# separately. Take their base pairings and combine them together.
# Calculate the total energy. For secondary structures, this splitting
# operation is allowed.
# We postulate that not all of the post-sequence can form secondary
# structures. Once the 30S complex binds to the m_rna, it prevents the
# formation of secondary structures that are mutually exclusive with
# ribosome binding. We define self.footprint to be the length of the
# 30S complex footprint. Here, we assume that the entire m_rna sequence
# downstream of the 16S r_rna binding site can not form secondary
# structures.
m_rna_pre = m_rna[begin:begin + most_5p_m_rna - 1]
post_window_end = m_rna_len + 1
post_window_begin = min(
start_pos + footprint, post_window_end) # Footprint
post_window_end = m_rna_len + 1
m_rna_post = m_rna[post_window_begin:post_window_end]
total_bp_x = []
total_bp_y = []
# Calculate pre-sequence folding
if m_rna_pre:
_, bp_xs, bp_ys = self.__runner.mfe([m_rna_pre], dangles=dangles)
bp_x_pre = bp_xs[0]
bp_y_pre = bp_ys[0]
else:
bp_x_pre = []
bp_y_pre = []
# Add pre-sequence base pairings to total base pairings
offset = 0 # Begins at 0
for (nt_x, nt_y) in zip(bp_x_pre, bp_y_pre):
total_bp_x.append(nt_x + offset)
total_bp_y.append(nt_y + offset)
# Add r_rna-binding site base pairings to total base pairings
offset = 0 # Begins at zero
if startpos_to_end_len < self.__cutoff:
r_rna_offset = startpos_to_end_len
else:
r_rna_offset = startpos_to_end_len
for (nt_x, nt_y) in zip(bp_x_target, bp_y_target):
total_bp_x.append(nt_x + offset)
total_bp_y.append(nt_y + r_rna_offset)
# Calculate post-sequence folding
if m_rna_post:
_, bp_xs, bp_ys = self.__runner.mfe([m_rna_post], dangles=dangles)
bp_x_post = bp_xs[0]
bp_y_post = bp_ys[0]
else:
bp_x_post = []
bp_y_post = []
offset = post_window_begin - begin
for (nt_x, nt_y) in zip(bp_x_post, bp_y_post):
total_bp_x.append(nt_x + offset)
total_bp_y.append(nt_y + offset)
m_rna_subseq = m_rna[begin:m_rna_len]
total_energy = self.__runner.energy([m_rna_subseq, self.__r_rna],
total_bp_x, total_bp_y,
dangles=dangles)
total_energy_withspacing = total_energy + dg_spacing_final
return (total_energy_withspacing, m_rna_subseq, total_bp_x, total_bp_y,
total_energy)
def __calc_dg_spacing(self, aligned_spacing):
'''Calculates the dG_spacing according to the value of the aligned
spacing. This relationship was determined through experiments.'''
d_s = aligned_spacing - self.__optimal_spacing
if aligned_spacing < self.__optimal_spacing:
dg_spacing_penalty = 12.2 / \
(1.0 + math.exp(2.5 * (d_s + 2.0))) ** 3.0
else:
dg_spacing_penalty = 0.048 * d_s * d_s + 0.24 * d_s
return dg_spacing_penalty
def __calc_dg_standby_site(self, m_rna, bp_x, bp_y, energy_before,
dangles):
'''Calculates the dg of standby given the structure of the m_rna:r_rna
complex.'''
# To calculate the mfe structure while disallowing base pairing at the
# standby site, we split the folded m_rna sequence into three parts:
# (i) a pre-sequence (before the standby site) that can fold; (ii) the
# standby site, which can not fold; (iii) the 16S r_rna binding site
# and downstream sequence, which has been previously folded.
standby_site_length = 4
# Identify the most 5p m_rna nt that is bound to r_rna
for (nt_x, nt_y) in zip(bp_x, bp_y):
# nt_x is m_rna, nt_y is r_rna, they are bound.
if nt_x <= len(m_rna) and nt_y > len(m_rna):
most_5p_m_rna = nt_x # starts counting from 0
break
# Extract the base pairings that are 3' of the most_5p_m_rna base
# pairing
bp_x_3p = []
bp_y_3p = []
for (nt_x, nt_y) in zip(bp_x, bp_y):
if nt_x >= most_5p_m_rna:
bp_x_3p.append(nt_x)
bp_y_3p.append(nt_y)
# Create the m_rna subsequence
m_rna_subsequence = m_rna[
0:max(0, most_5p_m_rna - standby_site_length - 1)]
# Fold it and extract the base pairings
if m_rna_subsequence:
_, bp_xs, bp_ys = self.__runner.mfe(
[m_rna_subsequence], dangles=dangles)
bp_x_5p = bp_xs[0] # [0] added 12/13/07
bp_y_5p = bp_ys[0]
else:
bp_x_5p = []
bp_y_5p = []
# Put the sets of base pairings together
bp_x_after = []
bp_y_after = []
for (nt_x, nt_y) in zip(bp_x_5p, bp_y_5p):
bp_x_after.append(nt_x)
bp_y_after.append(nt_y)
for (nt_x, nt_y) in zip(bp_x_3p, bp_y_3p):
bp_x_after.append(nt_x)
bp_y_after.append(nt_y)
# Calculate its energy
energy_after = self.__runner.energy([m_rna, self.__r_rna],
bp_x_after, bp_y_after,
dangles=dangles)
d_g = energy_before - energy_after
if d_g > 0.0:
d_g = 0.0
return d_g
def __get_random_rbs(self, rbs_len, shine_delgano, prob_shine_delgano,
core_length, max_nonoptimal_spacing):
'''Generates a random rbs sequence tailored towards the target
translation initiation rate.'''
rbs = []
# Choose core_length nucleotides.
# Choose from the SD sequence with probability prob_shine_delgano
# Choose from non-SD sequence with probability
# (1 - prob_shine_delgano) / 3
# The beginning/end of the core_length wrt to the SD sequence is
# uniformly randomly determined.
# core_length can't be greater then shine_delgano length:
core_length = min(len(shine_delgano), core_length)
diff = len(shine_delgano) - core_length
begin = int(random.random() * diff)
for i in range(core_length):
if random.random() < prob_shine_delgano:
rbs.append(shine_delgano[begin + i])
else:
choices = list(seq_utils.NUCLEOTIDES)
choices.remove(shine_delgano[begin + i])
rbs.append(random.choice(choices))
offset = diff - begin
spacing = random.choice(range(max(
0, offset + self.__optimal_spacing - max_nonoptimal_spacing),
offset + self.__optimal_spacing + max_nonoptimal_spacing))
rbs.extend([random.choice(seq_utils.NUCLEOTIDES)
for _ in range(spacing)])
# if len(rbs) > MAX_RBS_LENGTH:
# rbs = rbs[len(rbs) - MAX_RBS_LENGTH:len(rbs) + 1]
return ''.join([random.choice(seq_utils.NUCLEOTIDES)
for _ in range(rbs_len - len(rbs))] + rbs)
def __calc_aligned_spacing(self, m_rna, start_pos, bp_x, bp_y):
'''Calculates the aligned spacing between the 16S r_rna binding site and
the start codon.'''
# r_rna is the concatenated at the end of the sequence in 5' to 3'
# direction first: identify the farthest 3' nt in the r_rna that binds
# to the mRNA and return its mRNA base pairer
seq_len = len(m_rna) + len(self.__r_rna)
for r_rna_nt in range(seq_len, seq_len - len(self.__r_rna), -1):
if r_rna_nt in bp_y:
r_rna_pos = bp_y.index(r_rna_nt)
if bp_x[r_rna_pos] < start_pos:
farthest_3_prime_r_rna = r_rna_nt - len(m_rna)
m_rna_nt = bp_x[r_rna_pos]
# start_pos is counting starting from 0 (python)
distance_to_start = start_pos - m_rna_nt + 1
return distance_to_start - farthest_3_prime_r_rna
else:
break
return float('inf')
def get_dg(tir):
'''Gets dg from translation initiation rate.'''
return _RT_EFF * (math.log(_K) - math.log(tir))
def get_tir(d_g):
'''Gets translation initiation rate from dg.'''
return _K * math.exp(-d_g / _RT_EFF)
def _calc_longest_loop_bulge(m_rna, bp_x, bp_y, rbs=None):
''''Calculate the longest helical loop and bulge structure
(longest contiguous list of un-base paired nucleotides starting and
ending with a helix (loop -> same helix, bulge -> different helix)
in the secondary structure.'''
loop_length = 0
begin_helix = 1
bulge_loop_list = []
helical_loop_list = []
bulge_loop_start_end = []
helical_loop_start_end = []
if rbs is not None:
rbs_begin = m_rna.find(rbs)
rbs_end = rbs_begin + len(rbs)
nucleotide_range = range(rbs_begin, rbs_end + 1)
else:
nucleotide_range = range(1, len(m_rna) + 1)
# Find loops. Find bulges.
for nuc in nucleotide_range:
# nth nucleotide is not base-paired.
if bp_x.count(nuc) == 0 and bp_y.count(nuc) == 0:
# Determine if nearest neighbor nucleotides are base-paired
(x_1, x_2, y_1, y_2) = (bp_x.count(nuc - 1),
bp_x.count(nuc + 1),
bp_y.count(nuc - 1),
bp_y.count(nuc + 1))
# middle unpaired nt
if (x_1, x_2, y_1, y_2) == (0, 0, 0, 0):
loop_length += 1
# single mismatch -- loop
elif (x_1, x_2, y_1, y_2) == (1, 0, 0, 1) or \
(x_1, x_2, y_1, y_2) == (0, 1, 1, 0):
loop_length += 1
begin_helix = nuc - 1
end_helix = nuc + 1
# single mismatch -- bulge
elif (x_1, x_2, y_1, y_2) == (1, 1, 0, 0) or \
(x_1, x_2, y_1, y_2) == (0, 0, 1, 1):
loop_length += 1
begin_helix = nuc - 1
end_helix = nuc + 1
# starting unpaired nt
elif (x_1, x_2, y_1, y_2) == (1, 0, 0, 0) or \
(x_1, x_2, y_1, y_2) == (0, 0, 1, 0):
loop_length += 1
begin_helix = nuc - 1
# ending unpaired nt
elif (x_1, x_2, y_1, y_2) == (0, 1, 0, 0) or \
(x_1, x_2, y_1, y_2) == (0, 0, 0, 1):
loop_length += 1
end_helix = nuc + 1
# 1,0,1,0 is impossible w/o psuedoknots
# 0,1,0,1 is impossible w/o psuedoknots
# Also, all binary combinations with 3 or 4 true are impossible
# (nuc-1 or nuc+1 can not be in both bp_x and bp_y)
elif loop_length > 0:
# Bulge or loop?
# loop
if bp_x.count(begin_helix) > 0 and bp_y.count(end_helix) > 0 \
and bp_x.index(begin_helix) == bp_y.index(end_helix):
helical_loop_list.append(loop_length)
loop_length = 0
helical_loop_start_end.append((begin_helix, end_helix))
else:
bp_end = 0
bp_begin = 0
if bp_x.count(end_helix) > 0:
bp_begin = bp_y[bp_x.index(end_helix)]
if bp_y.count(end_helix) > 0:
bp_end = bp_x[bp_y.index(end_helix)]
if bp_x.count(begin_helix) > 0:
bp_end = bp_y[bp_x.index(begin_helix)]
if bp_y.count(begin_helix) > 0:
bp_begin = bp_x[bp_y.index(begin_helix)]
if bp_end > bp_begin:
bulge_loop_list.append(loop_length)
loop_length = 0
bulge_loop_start_end.append((begin_helix, end_helix))
else:
loop_length = 0
return helical_loop_list, bulge_loop_list, helical_loop_start_end, \
bulge_loop_start_end
|
synbiochem/PathwayGenie
|
parts_genie/rbs_calculator.py
|
Python
|
mit
| 22,917
|
[
"VisIt"
] |
a3c17f1c909e8849809dbee6efa2350690a6dd736bca1c627b38f63fc62a6d7b
|
# ANNarchy - AEIF_cond_exp
#
# Adaptive exponential integrate-and-fire model.
#
# http://www.scholarpedia.org/article/Adaptive_exponential_integrate-and-fire_model
#
# Introduced in
#
# Brette R. and Gerstner W. (2005), Adaptive Exponential Integrate-and-Fire Model as an Effective Description of Neuronal Activity, J. Neurophysiol. 94: 3637 - 3642.
#
# This is a reimplementation of the Brian example:
#
# http://briansimulator.org/docs/examples-frompapers_Brette_Gerstner_2005.html
#
# authors: Helge Uelo Dinkelbach, Julien Vitay
from ANNarchy import *
# Set the discretization step
dt = 0.1
setup(dt=dt)
# Create a population with one AdEx neuron
pop = Population(geometry=1, neuron=EIF_cond_exp_isfa_ista)
# Regular spiking (paper)
pop.tau_w, pop.a, pop.b, pop.v_reset = 144.0, 4.0, 0.0805, -70.6
# Bursting
#pop.tau_w, pop.a, pop.b, pop.v_reset = 20.0, 4.0, 0.5, pop.v_thresh + 5.0
# Fast spiking
#pop.tau_w, pop.a, pop.b, pop.v_reset = 144.0, 2000.0*pop.cm/144.0, 0.0, -70.6
# Compile the network
compile()
# Start recording
m = Monitor(pop, ['spike', 'v', 'w'])
# Add current of 1 nA and simulate
simulate(20.0)
pop.i_offset = 1.0
simulate(100.0)
pop.i_offset = 0.0
simulate(20.0)
# Retrieve the results
data = m.get()
spikes = data['spike'][0]
v = data['v'][:, 0]
w = data['w'][:, 0]
if len(spikes)>0:
v[spikes] = 20.0
# Plot the activity
import matplotlib.pyplot as plt
plt.subplot(2,1,1)
plt.plot(dt*np.arange(140.0/dt), v)
plt.ylabel('v')
plt.title('Adaptive exponential integrate-and-fire')
plt.subplot(2,1,2)
plt.plot(dt*np.arange(140.0/dt), w)
plt.xlabel('Time (ms)')
plt.ylabel('w')
plt.show()
|
vitay/ANNarchy
|
examples/pyNN/AEIF_cond_exp.py
|
Python
|
gpl-2.0
| 1,644
|
[
"Brian",
"NEURON"
] |
c162b92d51a7dc372c76d6ad2e5e340848882b8873c4d6f695935bd4c7c27752
|
# coding: utf-8
# Copyright (c) Henniggroup.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals, print_function
"""
Objects Maker module:
This module contains functions for creating the (singleton) objects
used by the genetic algorithm during the search.
"""
from gasp import general
from gasp import population
from gasp import geometry as geo
from gasp import variations
from gasp import energy_calculators
from gasp import organism_creators
from gasp import development
from pymatgen.core.structure import Structure
import os
import math
def make_objects(parameters):
"""
Constructs the needed objects for the genetic algorithm search.
Returns a dictionary containing the objects.
Args:
parameters: the dictionary produced by calling yaml.load() on the input
file
"""
# to hold all the objects
objects_dict = {}
# make the composition space object
if 'CompositionSpace' in parameters:
composition_space = general.CompositionSpace(
parameters['CompositionSpace'])
else:
print('Input file must contain a "CompositionSpace" block.')
print("Quitting...")
quit()
objects_dict['composition_space'] = composition_space
# make the constraints object
if 'Constraints' in parameters:
if 'min_num_atoms' in parameters['Constraints']:
if parameters['Constraints']['min_num_atoms'] < 2:
print('The value passed to the "min_num_atoms" keyword in the '
'Constraints block must greater than or equal to 2.')
print('Quitting...')
quit()
constraints = development.Constraints(parameters['Constraints'],
composition_space)
else:
constraints = development.Constraints('default', composition_space)
objects_dict['constraints'] = constraints
# make the geometry object
if 'Geometry' not in parameters:
geometry = geo.Bulk()
elif parameters['Geometry'] in (None, 'default'):
geometry = geo.Bulk()
elif 'shape' not in parameters['Geometry']:
geometry = geo.Bulk()
elif parameters['Geometry']['shape'] == 'cluster':
geometry = geo.Cluster(parameters['Geometry'])
elif parameters['Geometry']['shape'] == 'wire':
geometry = geo.Wire(parameters['Geometry'])
elif parameters['Geometry']['shape'] == 'sheet':
geometry = geo.Sheet(parameters['Geometry'])
# TODO: add any other non-bulk geometries here
else:
geometry = geo.Bulk()
objects_dict['geometry'] = geometry
# make the development object
if 'Development' in parameters:
developer = development.Developer(parameters['Development'], geometry)
else:
developer = development.Developer('default', geometry)
objects_dict['developer'] = developer
# make the redundancy guard object
if 'RedundancyGuard' in parameters:
redundancy_guard = development.RedundancyGuard(
parameters['RedundancyGuard'], geometry)
else:
redundancy_guard = development.RedundancyGuard('default', geometry)
objects_dict['redundancy_guard'] = redundancy_guard
# make the id generator
id_generator = general.IDGenerator()
objects_dict['id_generator'] = id_generator
# make the organism creators
initial_organism_creators = make_organism_creators(
parameters, composition_space, constraints)
# if more than one organism creator, sort them so that the attempts-based
# ones are at the front and the successes-based ones are at the back
if len(initial_organism_creators) > 1:
initial_organism_creators.sort(key=lambda x: x.is_successes_based)
objects_dict['organism_creators'] = initial_organism_creators
# the number of energy calculations to run at a time
if 'NumCalcsAtOnce' not in parameters:
num_calcs_at_once = 1
elif parameters['NumCalcsAtOnce'] in (None, 'default'):
num_calcs_at_once = 1
else:
num_calcs_at_once = parameters['NumCalcsAtOnce']
objects_dict['num_calcs_at_once'] = num_calcs_at_once
# get the run title
if 'RunTitle' not in parameters:
run_dir_name = 'garun'
elif parameters['RunTitle'] in (None, 'default'):
run_dir_name = 'garun'
else:
run_dir_name = 'garun_' + str(parameters['RunTitle'])
objects_dict['run_dir_name'] = run_dir_name
# make the energy calculator
energy_calculator = make_energy_calculator(parameters, geometry,
composition_space)
objects_dict['energy_calculator'] = energy_calculator
# make the stopping criteria
stopping_criteria = make_stopping_criteria(parameters, composition_space)
objects_dict['stopping_criteria'] = stopping_criteria
# determine which variations should have non-zero default fractions
do_permutation = False
if len(composition_space.get_all_swappable_pairs()) > 0:
do_permutation = True
# get the number of atoms per composition
if len(composition_space.endpoints) > 1:
atoms_per_comp = len(composition_space.endpoints)
else:
atoms_per_comp = \
composition_space.endpoints[0].reduced_composition.num_atoms
# see if numstoichmut can be done w/o violating the min or max number of
# atoms constraints
do_atomsmut = False
if composition_space.objective_function == 'pd':
if constraints.min_num_atoms != constraints.max_num_atoms:
do_atomsmut = True
elif composition_space.objective_function == 'epa':
bottom = int(math.ceil(constraints.min_num_atoms/atoms_per_comp))
top = int(math.floor(constraints.max_num_atoms/atoms_per_comp))
if top > bottom:
do_atomsmut = True
# set default fractions for the variations
default_variation_fractions = {}
if do_permutation and do_atomsmut:
default_variation_fractions['permutation'] = 0.1
default_variation_fractions['num_atoms_mut'] = 0.1
default_variation_fractions['structure_mut'] = 0.1
default_variation_fractions['mating'] = 0.7
elif not do_permutation and do_atomsmut:
default_variation_fractions['permutation'] = 0.0
default_variation_fractions['num_atoms_mut'] = 0.1
default_variation_fractions['structure_mut'] = 0.1
default_variation_fractions['mating'] = 0.8
elif do_permutation and not do_atomsmut:
default_variation_fractions['permutation'] = 0.1
default_variation_fractions['num_atoms_mut'] = 0.0
default_variation_fractions['structure_mut'] = 0.1
default_variation_fractions['mating'] = 0.8
elif not do_permutation and not do_atomsmut:
default_variation_fractions['permutation'] = 0.0
default_variation_fractions['num_atoms_mut'] = 0.0
default_variation_fractions['structure_mut'] = 0.2
default_variation_fractions['mating'] = 0.8
# make the variations
variations_list = make_variations(parameters, default_variation_fractions,
composition_space)
# check that at least one variation has been used
if len(variations_list) == 0:
print('At least one variation must be used. Either leave entire '
'"Variations" block blank to use default variations, or specify '
'at least one variation within the "Variations" block.')
print('Quitting...')
quit()
# check that the variations' fraction variables sum to 1
frac_sum = 0.0
for variation in variations_list:
frac_sum = frac_sum + variation.fraction
if frac_sum < 0.999 or frac_sum > 1.001:
print("The Variations' fraction values must sum to 1.")
print('Quitting...')
quit()
objects_dict['variations'] = variations_list
# make the pool, selection, and composition fitness weight
if 'Pool' not in parameters:
pool = population.Pool(None, composition_space, run_dir_name)
else:
if 'num_promoted' in parameters['Pool']:
if parameters['Pool']['num_promoted'] < 1:
print('At least one organism must be promoted in the Pool.')
print('Quitting...')
quit()
else:
pool = population.Pool(parameters['Pool'], composition_space,
run_dir_name)
else:
pool = population.Pool(parameters['Pool'], composition_space,
run_dir_name)
if 'Selection' not in parameters:
selection = general.SelectionProbDist(None, pool.size)
else:
selection = general.SelectionProbDist(parameters['Selection'],
pool.size)
if 'CompositionFitnessWeight' not in parameters:
comp_fitness_weight = general.CompositionFitnessWeight(None)
else:
if 'max_weight' in parameters['CompositionFitnessWeight']:
if parameters['CompositionFitnessWeight']['max_weight'] < 0 or \
parameters['CompositionFitnessWeight']['max_weight'] > 1:
print('The maximum weight of the composition fitness must lie'
' in the interval [0,1].')
print('Please change the value passed to the "max_weight" '
'keyword in the CompositionFitnessWeight block.')
print('Quitting...')
quit()
else:
comp_fitness_weight = general.CompositionFitnessWeight(
parameters['CompositionFitnessWeight'])
else:
comp_fitness_weight = general.CompositionFitnessWeight(
parameters['CompositionFitnessWeight'])
pool.selection = selection
pool.comp_fitness_weight = comp_fitness_weight
objects_dict['pool'] = pool
return objects_dict
def make_organism_creators(parameters, composition_space, constraints):
"""
Returns a list containing organism creator objects.
Args:
parameters: the dictionary produced by calling yaml.load() on the input
file
composition_space: the CompositionSpace of the search
constraints: the Constraints of the search
"""
if 'InitialPopulation' not in parameters:
return make_default_organism_creator(composition_space, constraints)
elif parameters['InitialPopulation'] in (None, 'default'):
return make_default_organism_creator(composition_space, constraints)
# make the specified creators
else:
# check that at least one valid option is used
# TODO: if other organism creators are used, check them here as well
if 'random' not in parameters['InitialPopulation'] and 'from_files' \
not in parameters['InitialPopulation']:
print('At least one valid option for making structures for the '
'initial population must be provided.')
print('Please use the "random" and/or "from_files" keywords in '
'the InitialPopulation block.')
print('Quitting...')
quit()
initial_organism_creators = []
# the random organism creator
if 'random' in parameters['InitialPopulation']:
random_organism_creator = organism_creators.RandomOrganismCreator(
parameters['InitialPopulation']['random'], composition_space,
constraints)
initial_organism_creators.append(random_organism_creator)
# the from files organism creator
if 'from_files' not in parameters['InitialPopulation']:
if composition_space.objective_function == 'pd':
print('For phase diagram searches, reference structures at '
'each endpoint of the composition space must be '
'provided.')
print('Please use the "from_files" keyword in the '
'InitialPopulation block to provide the reference '
'structures.')
print('Quitting...')
quit()
# if nothing is given after the from_files keyword
elif parameters['InitialPopulation']['from_files'] is None:
print('The path to the folder containing the files must be '
'provided. Please use the "path_to_folder" keyword.')
print('Quitting...')
quit()
# if path_to_folder keyword is not given
elif 'path_to_folder' not in parameters['InitialPopulation'][
'from_files']:
print('Incorrect keyword given after "from_files" in the '
'InitialPopulation block. Please use the "path_to_folder" '
'keyword.')
print('Quitting...')
quit()
else:
given_path = parameters['InitialPopulation']['from_files'][
'path_to_folder']
# if no path was given after path_to_folder keyword
if given_path is None:
print('The path to the folder containing the files for the '
'initial population must be provided. Please give the '
'path after the "path_to_folder" keyword.')
print('Quitting...')
quit()
# if the given path does not exist
elif not os.path.exists(given_path):
print('The given folder containing structures for the initial '
'population does not exist.')
print('Quitting...')
quit()
# if the folder exists, check that it contains files
elif len([f for f in os.listdir(given_path) if
os.path.isfile(os.path.join(given_path, f))]) == 0:
print('The given folder containing structures for the initial '
'population does not contain any files.')
print('Quitting...')
quit()
else:
files_organism_creator = organism_creators.FileOrganismCreator(
given_path)
# check that the files cover all composition space endpoints
if composition_space.objective_function == 'pd':
cells = files_organism_creator.get_cells()
provided_endpoints = []
for endpoint in composition_space.endpoints:
for cell in cells:
if cell.composition.reduced_composition.almost_equals(
endpoint.reduced_composition) and \
endpoint not in provided_endpoints:
provided_endpoints.append(endpoint)
# check if we got them all
for endpoint in composition_space.endpoints:
if endpoint not in provided_endpoints:
print('Error: valid structure files not provided '
'to the initial population for all '
'endpoints of the composition space.')
print('Quitting...')
quit()
initial_organism_creators.append(files_organism_creator)
# TODO: if other organism creators are used, they should be
# instantiated here
return initial_organism_creators
def make_default_organism_creator(composition_space, constraints):
"""
Returns a list containing a RandomOrganismCreator, or quits.
Args:
composition_space: the CompositionSpace of the search
constraints: the Constraints of the search
"""
if composition_space.objective_function == 'pd':
print('For phase diagram searches, reference structures at each '
'endpoint of the composition space must be provided in the '
'initial population.')
print('Please use the "from_files" keyword in the '
'InitialPopulation block to provide the reference '
'structures.')
print('Quitting...')
quit()
else:
random_organism_creator = organism_creators.RandomOrganismCreator(
'default', composition_space, constraints)
return [random_organism_creator]
def make_energy_calculator(parameters, geometry, composition_space):
"""
Returns an EnergyCode object corresponding to which energy code was
specified in the input file. Quits if an energy code object cannot be made.
Args:
parameters: the dictionary produced by calling yaml.load() on the input
file
geometry: the Geometry for the search
composition_space: the CompositionSpace of the search
"""
if 'EnergyCode' not in parameters:
print('A method for calculating energy must be provided. Please use '
'the "EnergyCode" keyword.')
print('Quitting...')
quit()
elif parameters['EnergyCode'] is None:
print('An energy code must be specified after the "EnergyCode" '
'keyword.')
print('Quitting...')
quit()
# for GULP
elif 'gulp' in parameters['EnergyCode']:
return make_gulp_energy_calculator(parameters, geometry)
# for LAMMPS
elif 'lammps' in parameters['EnergyCode']:
return make_lammps_energy_calculator(parameters, geometry)
# for VASP
elif 'vasp' in parameters['EnergyCode']:
return make_vasp_energy_calculator(parameters, composition_space,
geometry)
else:
print('The given energy code name is invalid.')
print('Quitting...')
quit()
def make_gulp_energy_calculator(parameters, geometry):
"""
Returns a GulpEnergyCalculator object, or quits if one cannot be made.
Args:
parameters: the dictionary produced by calling yaml.load() on the input
file
geometry: the Geometry for the search
"""
if parameters['EnergyCode']['gulp'] is None:
print('No GULP header or potential files given. Please use the '
'"header_file" and "potential_file" keywords.')
print('Quitting...')
quit()
else:
# get the header file
if 'header_file' not in parameters['EnergyCode']['gulp']:
print('A GULP header file must be provided. Please use the '
'"header_file" keyword.')
print('Quitting...')
quit()
elif parameters['EnergyCode']['gulp']['header_file'] is None:
print('No GULP header file given after the "header_file" '
'keyword. Please provide one.')
print('Quitting...')
quit()
else:
# get the path to the header file
header_file_path = parameters['EnergyCode']['gulp'][
'header_file']
# check that the header file exists
if not os.path.exists(header_file_path):
print('The given GULP header file does not exist.')
print('Quitting...')
quit()
# get the potential file
if 'potential_file' not in parameters['EnergyCode']['gulp']:
print('A GULP potential file must be provided. Please use the '
'"potential_file" keyword.')
print('Quitting...')
quit()
elif parameters['EnergyCode']['gulp']['potential_file'] is None:
print('No GULP potential file given after the '
'"potential_file" keyword. Please provide one.')
print('Quitting...')
quit()
else:
# get the path to the potential file
potential_file_path = parameters['EnergyCode']['gulp'][
'potential_file']
# check that the potential file exists
if not os.path.exists(potential_file_path):
print('The given GULP potential file does not exist.')
print('Quitting...')
quit()
return energy_calculators.GulpEnergyCalculator(
header_file_path, potential_file_path, geometry)
def make_lammps_energy_calculator(parameters, geometry):
"""
Returns a LammpsEnergyCalculator object, or quits if one cannot be made.
Args:
parameters: the dictionary produced by calling yaml.load() on the input
file
geometry: the Geometry for the search
"""
if parameters['EnergyCode']['lammps'] is None:
print('No LAMMPS input script given. Please use the "input_script" '
'keyword.')
print('Quitting...')
quit()
else:
# get the input script
if 'input_script' not in parameters['EnergyCode']['lammps']:
print('A LAMMPS input script must be provided. Please use the '
'"header_file" keyword.')
print('Quitting...')
quit()
elif parameters['EnergyCode']['lammps']['input_script'] is None:
print('No LAMMPS input script given after the "input_script" '
'keyword. Please provide one.')
print('Quitting...')
quit()
else:
# get the path to the input script
input_script_path = parameters['EnergyCode']['lammps'][
'input_script']
# check that the input script exists
if not os.path.exists(input_script_path):
print('The given LAMMPS input script does not exist.')
print('Quitting...')
quit()
return energy_calculators.LammpsEnergyCalculator(
input_script_path, geometry)
def make_vasp_energy_calculator(parameters, composition_space, geometry):
"""
Returns a VaspEnergyCalculator object, or quits if one cannot be made.
Args:
parameters: the dictionary produced by calling yaml.load() on the input
file
composition_space: the CompositionSpace of the search
geometry: the Geometry for the search
"""
if parameters['EnergyCode']['vasp'] is None:
print('No VASP input files given.')
print('Quitting...')
quit()
else:
# the INCAR file
if 'incar' not in parameters['EnergyCode']['vasp']:
print('An INCAR file must be provided. Please use the "incar" '
'keyword.')
print('Quitting...')
quit()
elif parameters['EnergyCode']['vasp']['incar'] is None:
print('No INCAR file was given after the "incar" keyword. Please '
'provide one.')
print('Quitting...')
quit()
else:
# get the path to the INCAR file
incar_path = parameters['EnergyCode']['vasp']['incar']
# check that the INCAR file exists
if not os.path.exists(incar_path):
print('The given INCAR file does not exist.')
print('Quitting...')
quit()
# the KPOINTS file
if 'kpoints' not in parameters['EnergyCode']['vasp']:
print('A KPOINTS file must be provided. Please use the '
'"kpoints" keyword.')
print('Quitting...')
quit()
elif parameters['EnergyCode']['vasp']['kpoints'] is None:
print('No KPOINTS file was given after the "kpoints" keyword. '
'Please provide one.')
print('Quitting...')
quit()
else:
# get the path to the KPOINTS file
kpoints_path = parameters['EnergyCode']['vasp']['kpoints']
# check that the KPOINTS file exists
if not os.path.exists(kpoints_path):
print('The given KPOINTS file does not exist.')
print('Quitting...')
quit()
# the POTCAR files
if 'potcars' not in parameters['EnergyCode']['vasp']:
print('POTCAR file(s) must be provided. Please use the '
'"potcars" keyword.')
print('Quitting...')
quit()
elif parameters['EnergyCode']['vasp']['potcars'] is None:
print('No POTCAR files were given after the "potcars" keyword. '
'Please provide them.')
print('Quitting...')
quit()
else:
# get the the paths to the POTCAR files of each element
potcar_paths = parameters['EnergyCode']['vasp']['potcars']
# check that enough POTCAR files have been provided
elements_list = composition_space.get_all_elements()
if len(potcar_paths) < len(elements_list):
print('Not enough POTCAR files provided - one must be '
'given for each element in the composition space. '
'Please provide them.')
print('Quitting...')
quit()
# check that each element has been specified below the
# 'potcars' keyword
for element in elements_list:
if element.symbol not in potcar_paths:
print('No POTCAR file given for {}. Please provide '
'one.'.format(element.symbol))
print('Quitting...')
quit()
# for each element, check that a POTCAR file has been given and
# that it exists
for key in potcar_paths:
if potcar_paths[key] is None:
print('No POTCAR file given for {}. Please provide '
'one.'.format(key))
print('Quitting...')
quit()
elif not os.path.exists(potcar_paths[key]):
print('The POTCAR file given for {} does not '
'exist.'.format(key))
print('Quitting...')
quit()
return energy_calculators.VaspEnergyCalculator(
incar_path, kpoints_path, potcar_paths, geometry)
def make_stopping_criteria(parameters, composition_space):
"""
Returns a StoppingCriteria object.
Args:
parameters: the dictionary produced by calling yaml.load() on the input
file
composition_space: the CompositionSpace of the search
"""
if 'StoppingCriteria' not in parameters:
return general.StoppingCriteria(None, composition_space)
elif parameters['StoppingCriteria'] in (None, 'default'):
return general.StoppingCriteria(None, composition_space)
elif 'found_structure' in parameters['StoppingCriteria']:
if parameters['StoppingCriteria']['found_structure'] in (None,
'default'):
return general.StoppingCriteria(parameters['StoppingCriteria'],
composition_space)
else:
# check that the file exists
given_path = parameters['StoppingCriteria']['found_structure']
if not os.path.exists(given_path):
print('The file containing the structure to find does not '
'exist.')
print('Quitting...')
quit()
# check that the file has the correct suffix or prefix
elif not (os.path.basename(given_path).endswith('.cif') or
os.path.basename(given_path).startswith('POSCAR.')):
print('File containing structure to find must be in POSCAR or '
'cif format and begin with POSCAR. or end with .cif, '
'respectively.')
print('Quitting...')
quit()
# check that file can be read properly
else:
try:
Structure.from_file(given_path)
return general.StoppingCriteria(
parameters['StoppingCriteria'], composition_space)
except ValueError:
print('Error reading the structure to find from the given '
'file.')
print('Quitting...')
quit()
else:
return general.StoppingCriteria(parameters['StoppingCriteria'],
composition_space)
def make_variations(parameters, default_fractions, composition_space):
"""
Creates the variations, using default parameter values if needed.
Returns a list containing the variation objects (Mating, StructureMut,
NumAtomssMut and Permutation).
Args:
parameters: the dictionary produced by calling yaml.load() on the input
file
default_fractions: a dictionary containing the default fractions to use
for each variation
composition_space: the CompositionSpace of the search
"""
if 'Variations' not in parameters:
return make_default_variations(default_fractions, composition_space)
elif parameters['Variations'] in (None, 'default'):
return make_default_variations(default_fractions, composition_space)
else:
variations_list = []
# mating
if 'Mating' not in parameters['Variations']:
pass
elif parameters['Variations']['Mating'] is None:
print('If the "Mating" keyword is used, its "fraction" keyword '
'must also be set.')
print('Quitting...')
quit()
else:
if parameters['Variations']['Mating']['fraction'] in (None,
'default'):
print('The "fraction" kwyword is not optional and must '
'contain a valid entry (between 0 and 1) for the Mating '
'variation.')
print('Quitting...')
quit()
else:
mating = variations.Mating(parameters['Variations']['Mating'])
variations_list.append(mating)
# structure mutation
if 'StructureMut' not in parameters['Variations']:
pass
elif parameters['Variations']['StructureMut'] is None:
print('If the "StructureMut" keyword is used, its "fraction" '
'keyword must also be set.')
print('Quitting...')
quit()
else:
if parameters['Variations']['StructureMut']['fraction'] in (
None, 'default'):
print('The "fraction" keyword is not optional and must '
'contain a valid entry (between 0 and 1) for the '
'StructureMut variation.')
print('Quitting...')
quit()
else:
structure_mut = variations.StructureMut(
parameters['Variations']['StructureMut'])
variations_list.append(structure_mut)
# mutating the number of atoms in the cell
if 'NumAtomsMut' not in parameters['Variations']:
pass
elif parameters['Variations']['NumAtomsMut'] is None:
print('If the "NumAtomsMut" keyword is used, its "fraction" '
'keyword must also be set.')
print('Quitting...')
quit()
else:
if parameters['Variations']['NumAtomsMut']['fraction'] in (
None, 'default'):
print('The "fraction" keyword is not optional and must '
'contain a valid entry (between 0 and 1) for the '
'NumAtomsMut variation.')
print('Quitting...')
quit()
else:
num_atoms_mut = variations.NumAtomsMut(
parameters['Variations']['NumAtomsMut'])
variations_list.append(num_atoms_mut)
# permutation (swapping atoms)
if 'Permutation' not in parameters['Variations']:
pass
elif parameters['Variations']['Permutation'] is None:
print('If the "Permutation" keyword is used, its "fraction" '
'keyword must also be set.')
print('Quitting...')
quit()
else:
if parameters['Variations']['Permutation']['fraction'] in (
None, 'default'):
print('The "fraction" keyword is not optional and must '
'contain a valid entry (between 0 and 1) for the '
'Permutation variation.')
print('Quitting...')
else:
permutation = variations.Permutation(
parameters['Variations']['Permutation'], composition_space)
variations_list.append(permutation)
return variations_list
def make_default_variations(default_fractions, composition_space):
"""
Creates the variations with default parameter values and the provided
default fractions.
Returns a list containing the variation objects (Mating, StructureMut,
NumAtomsMut and Permutation).
Args:
default_fractions: a dictionary containing the default fractions to use
for each variation
composition_space: the CompositionSpace of the search
"""
variations_list = []
mating = variations.Mating({'fraction': default_fractions['mating']})
structure_mut = variations.StructureMut(
{'fraction': default_fractions['structure_mut']})
num_atoms_mut = variations.NumAtomsMut(
{'fraction': default_fractions['num_atoms_mut']})
permutation = variations.Permutation(
{'fraction': default_fractions['permutation']},
composition_space)
variations_list.append(mating)
variations_list.append(structure_mut)
variations_list.append(num_atoms_mut)
variations_list.append(permutation)
return variations_list
|
henniggroup/GASP-python
|
gasp/objects_maker.py
|
Python
|
mit
| 34,327
|
[
"GULP",
"LAMMPS",
"VASP",
"pymatgen"
] |
71ae0ed1199bcf2cdf3a3bebc4fe1a252602d935f915f09775c95549aec7c0d2
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2009-2013 Red Hat, Inc.
# This file is part of python-fedora
#
# python-fedora is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# python-fedora is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with python-fedora; if not, see <http://www.gnu.org/licenses/>
#
'''Implement a class that sets up simple communication to a Fedora Service.
.. moduleauthor:: Luke Macken <lmacken@redhat.com>
.. moduleauthor:: Toshio Kuratomi <tkuratom@redhat.com>
'''
import Cookie
import copy
import urllib
import httplib
import logging
# For handling an exception that's coming from requests:
import ssl
import time
import warnings
try:
from urlparse import urljoin
from urlparse import urlparse
except ImportError:
# Python3 support
from urllib.parse import urljoin
from urllib.parse import urlparse
try:
from hashlib import sha1 as sha_constructor
except ImportError:
from sha import new as sha_constructor
from bunch import bunchify
from kitchen.text.converters import to_bytes
import requests
from fedora import __version__
from fedora.client import AppError, AuthError, ServerError
log = logging.getLogger(__name__)
class ProxyClient(object):
# pylint: disable-msg=R0903
'''
A client to a Fedora Service. This class is optimized to proxy multiple
users to a service. ProxyClient is designed to be threadsafe so that
code can instantiate one instance of the class and use it for multiple
requests for different users from different threads.
If you want something that can manage one user's connection to a Fedora
Service, then look into using BaseClient instead.
This class has several attributes. These may be changed after
instantiation however, please note that this class is intended to be
threadsafe. Changing these values when another thread may affect more
than just the thread that you are making the change in. (For instance,
changing the debug option could cause other threads to start logging debug
messages in the middle of a method.)
.. attribute:: base_url
Initial portion of the url to contact the server. It is highly
recommended not to change this value unless you know that no other
threads are accessing this :class:`ProxyClient` instance.
.. attribute:: useragent
Changes the useragent string that is reported to the web server.
.. attribute:: session_name
Name of the cookie that holds the authentication value.
.. attribute:: session_as_cookie
If :data:`True`, then the session information is saved locally as
a cookie. This is here for backwards compatibility. New code should
set this to :data:`False` when constructing the :class:`ProxyClient`.
.. attribute:: debug
If :data:`True`, then more verbose logging is performed to aid in
debugging issues.
.. attribute:: insecure
If :data:`True` then the connection to the server is not checked to be
sure that any SSL certificate information is valid. That means that
a remote host can lie about who it is. Useful for development but
should not be used in production code.
.. attribute:: retries
Setting this to a positive integer will retry failed requests to the
web server this many times. Setting to a negative integer will retry
forever.
.. attribute:: timeout
A float describing the timeout of the connection. The timeout only
affects the connection process itself, not the downloading of the
response body. Defaults to 120 seconds.
.. versionchanged:: 0.3.33
Added the timeout attribute
'''
log = log
def __init__(self, base_url, useragent=None, session_name='tg-visit',
session_as_cookie=True, debug=False, insecure=False,
retries=None,
timeout=None):
'''Create a client configured for a particular service.
:arg base_url: Base of every URL used to contact the server
:kwarg useragent: useragent string to use. If not given, default to
"Fedora ProxyClient/VERSION"
:kwarg session_name: name of the cookie to use with session handling
:kwarg session_as_cookie: If set to True, return the session as a
SimpleCookie. If False, return a session_id. This flag allows us
to maintain compatibility for the 0.3 branch. In 0.4, code will
have to deal with session_id's instead of cookies.
:kwarg debug: If True, log debug information
:kwarg insecure: If True, do not check server certificates against
their CA's. This means that man-in-the-middle attacks are
possible against the `BaseClient`. You might turn this option on
for testing against a local version of a server with a self-signed
certificate but it should be off in production.
:kwarg retries: if we get an unknown or possibly transient error from
the server, retry this many times. Setting this to a negative
number makes it try forever. Defaults to zero, no retries.
:kwarg timeout: A float describing the timeout of the connection. The
timeout only affects the connection process itself, not the
downloading of the response body. Defaults to 120 seconds.
.. versionchanged:: 0.3.33
Added the timeout kwarg
'''
# Setup our logger
self._log_handler = logging.StreamHandler()
self.debug = debug
format = logging.Formatter("%(message)s")
self._log_handler.setFormatter(format)
self.log.addHandler(self._log_handler)
# When we are instantiated, go ahead and silence the python-requests
# log. It is kind of noisy in our app server logs.
if not debug:
requests_log = logging.getLogger("requests")
requests_log.setLevel(logging.WARN)
self.log.debug('proxyclient.__init__:entered')
if base_url[-1] != '/':
base_url = base_url + '/'
self.base_url = base_url
self.domain = urlparse(self.base_url).netloc
self.useragent = useragent or 'Fedora ProxyClient/%(version)s' % {
'version': __version__}
self.session_name = session_name
self.session_as_cookie = session_as_cookie
if session_as_cookie:
warnings.warn(
'Returning cookies from send_request() is'
' deprecated and will be removed in 0.4. Please port your'
' code to use a session_id instead by calling the ProxyClient'
' constructor with session_as_cookie=False',
DeprecationWarning, stacklevel=2)
self.insecure = insecure
# Have to do retries and timeout default values this way as BaseClient
# sends None for these values if not overridden by the user.
if retries is None:
self.retries = 0
else:
self.retries = retries
if timeout is None:
self.timeout = 120.0
else:
self.timeout = timeout
self.log.debug('proxyclient.__init__:exited')
def __get_debug(self):
'''Return whether we have debug logging turned on.
:Returns: True if debugging is on, False otherwise.
'''
if self._log_handler.level <= logging.DEBUG:
return True
return False
def __set_debug(self, debug=False):
'''Change debug level.
:kwarg debug: A true value to turn debugging on, false value to turn it
off.
'''
if debug:
self.log.setLevel(logging.DEBUG)
self._log_handler.setLevel(logging.DEBUG)
else:
self.log.setLevel(logging.ERROR)
self._log_handler.setLevel(logging.INFO)
debug = property(__get_debug, __set_debug, doc='''
When True, we log extra debugging statements. When False, we only log
errors.
''')
def send_request(self, method, req_params=None, auth_params=None,
file_params=None, retries=None, timeout=None):
'''Make an HTTP request to a server method.
The given method is called with any parameters set in ``req_params``.
If auth is True, then the request is made with an authenticated session
cookie. Note that path parameters should be set by adding onto the
method, not via ``req_params``.
:arg method: Method to call on the server. It's a url fragment that
comes after the base_url set in __init__(). Note that any
parameters set as extra path information should be listed here,
not in ``req_params``.
:kwarg req_params: dict containing extra parameters to send to the
server
:kwarg auth_params: dict containing one or more means of authenticating
to the server. Valid entries in this dict are:
:cookie: **Deprecated** Use ``session_id`` instead. If both
``cookie`` and ``session_id`` are set, only ``session_id`` will
be used. A ``Cookie.SimpleCookie`` to send as a session cookie
to the server
:session_id: Session id to put in a cookie to construct an identity
for the server
:username: Username to send to the server
:password: Password to use with username to send to the server
:httpauth: If set to ``basic`` then use HTTP Basic Authentication
to send the username and password to the server. This may be
extended in the future to support other httpauth types than
``basic``.
Note that cookie can be sent alone but if one of username or
password is set the other must as well. Code can set all of these
if it wants and all of them will be sent to the server. Be careful
of sending cookies that do not match with the username in this
case as the server can decide what to do in this case.
:kwarg file_params: dict of files where the key is the name of the
file field used in the remote method and the value is the local
path of the file to be uploaded. If you want to pass multiple
files to a single file field, pass the paths as a list of paths.
:kwarg retries: if we get an unknown or possibly transient error from
the server, retry this many times. Setting this to a negative
number makes it try forever. Default to use the :attr:`retries`
value set on the instance or in :meth:`__init__`.
:kwarg timeout: A float describing the timeout of the connection. The
timeout only affects the connection process itself, not the
downloading of the response body. Defaults to the :attr:`timeout`
value set on the instance or in :meth:`__init__`.
:returns: If ProxyClient is created with session_as_cookie=True (the
default), a tuple of session cookie and data from the server.
If ProxyClient was created with session_as_cookie=False, a tuple
of session_id and data instead.
:rtype: tuple of session information and data from server
.. versionchanged:: 0.3.17
No longer send tg_format=json parameter. We rely solely on the
Accept: application/json header now.
.. versionchanged:: 0.3.21
* Return data as a Bunch instead of a DictContainer
* Add file_params to allow uploading files
.. versionchanged:: 0.3.33
Added the timeout kwarg
'''
self.log.debug('proxyclient.send_request: entered')
# parameter mangling
file_params = file_params or {}
# Check whether we need to authenticate for this request
session_id = None
username = None
password = None
if auth_params:
if 'session_id' in auth_params:
session_id = auth_params['session_id']
elif 'cookie' in auth_params:
warnings.warn(
'Giving a cookie to send_request() to'
' authenticate is deprecated and will be removed in 0.4.'
' Please port your code to use session_id instead.',
DeprecationWarning, stacklevel=2)
session_id = auth_params['cookie'].output(attrs=[],
header='').strip()
if 'username' in auth_params and 'password' in auth_params:
username = auth_params['username']
password = auth_params['password']
elif 'username' in auth_params or 'password' in auth_params:
raise AuthError('username and password must both be set in'
' auth_params')
if not (session_id or username):
raise AuthError(
'No known authentication methods'
' specified: set "cookie" in auth_params or set both'
' username and password in auth_params')
# urljoin is slightly different than os.path.join(). Make sure method
# will work with it.
method = method.lstrip('/')
# And join to make our url.
url = urljoin(self.base_url, urllib.quote(method))
data = None # decoded JSON via json.load()
# Set standard headers
headers = {
'User-agent': self.useragent,
'Accept': 'application/json',
}
# Files to upload
for field_name, local_file_name in file_params:
file_params[field_name] = open(local_file_name, 'rb')
cookies = requests.cookies.RequestsCookieJar()
# If we have a session_id, send it
if session_id:
# Anytime the session_id exists, send it so that visit tracking
# works. Will also authenticate us if there's a need. Note that
# there's no need to set other cookie attributes because this is a
# cookie generated client-side.
cookies.set(self.session_name, session_id)
complete_params = req_params or {}
if session_id:
# Add the csrf protection token
token = sha_constructor(session_id)
complete_params.update({'_csrf_token': token.hexdigest()})
auth = None
if username and password:
if auth_params.get('httpauth', '').lower() == 'basic':
# HTTP Basic auth login
auth = (username, password)
else:
# TG login
# Adding this to the request data prevents it from being
# logged by apache.
complete_params.update({
'user_name': to_bytes(username),
'password': to_bytes(password),
'login': 'Login',
})
# If debug, give people our debug info
self.log.debug('Creating request %(url)s' %
{'url': to_bytes(url)})
self.log.debug('Headers: %(header)s' %
{'header': to_bytes(headers, nonstring='simplerepr')})
if self.debug and complete_params:
debug_data = copy.deepcopy(complete_params)
if 'password' in debug_data:
debug_data['password'] = 'xxxxxxx'
self.log.debug('Data: %r' % debug_data)
if retries is None:
retries = self.retries
if timeout is None:
timeout = self.timeout
num_tries = 0
while True:
try:
response = requests.post(
url,
data=complete_params,
cookies=cookies,
headers=headers,
auth=auth,
verify=not self.insecure,
timeout=timeout,
)
except (requests.Timeout, requests.exceptions.SSLError) as e:
if isinstance(e, requests.exceptions.SSLError):
# And now we know how not to code a library exception
# hierarchy... We're expecting that requests is raising
# the following stupidity:
# requests.exceptions.SSLError(
# urllib3.exceptions.SSLError(
# ssl.SSLError('The read operation timed out')))
# If we weren't interested in reraising the exception with
# full tracdeback we could use a try: except instead of
# this gross conditional. But we need to code defensively
# because we don't want to raise an unrelated exception
# here and if requests/urllib3 can do this sort of
# nonsense, they may change the nonsense in the future
#
# And a note on the __class__.__name__/__module__ parsing:
# On top of all the other things it does wrong, requests
# is bundling a copy of urllib3. Distros can unbundle it.
# But because of the bundling, python will think
# exceptions raised by the version downloaded by pypi
# (requests.packages.urllib3.exceptions.SSLError) are
# different than the exceptions raised by the unbundled
# distro version (urllib3.exceptions.SSLError). We could
# do a try: except trying to import both of these
# SSLErrors and then code to detect either one of them but
# the whole thing is just stupid. So we'll use a stupid
# hack of our own that (1) means we don't have to depend
# on urllib3 just for this exception and (2) is (slightly)
# less likely to break on the whims of the requests
# author.
if not (e.args
and e.args[0].__class__.__name__ == 'SSLError'
and e.args[0].__class__.__module__.endswith(
'urllib3.exceptions')
and e.args[0].args
and isinstance(e.args[0].args[0], ssl.SSLError)
and e.args[0].args[0].args
and 'timed out' in e.args[0].args[0].args[0]):
# We're only interested in timeouts here
raise
self.log.debug('Request timed out')
if retries < 0 or num_tries < retries:
num_tries += 1
self.log.debug(
'Attempt #%(try)s failed' % {'try': num_tries})
time.sleep(0.5)
continue
# Fail and raise an error
# Raising our own exception protects the user from the
# implementation detail of requests vs pycurl vs urllib
raise ServerError(
url, -1, 'Request timed out after %s seconds' % timeout)
# When the python-requests module gets a response, it attempts to
# guess the encoding using chardet (or a fork)
# That process can take an extraordinarily long time for long
# response.text strings.. upwards of 30 minutes for FAS queries to
# /accounts/user/list JSON api! Therefore, we cut that codepath
# off at the pass by assuming that the response is 'utf-8'. We can
# make that assumption because we're only interfacing with servers
# that we run (and we know that they all return responses
# encoded 'utf-8').
response.encoding = 'utf-8'
# Check for auth failures
# Note: old TG apps returned 403 Forbidden on authentication
# failures.
# Updated apps return 401 Unauthorized
# We need to accept both until all apps are updated to return 401.
http_status = response.status_code
if http_status in (401, 403):
# Wrong username or password
self.log.debug('Authentication failed logging in')
raise AuthError(
'Unable to log into server. Invalid'
' authentication tokens. Send new username and password')
elif http_status >= 400:
if retries < 0 or num_tries < retries:
# Retry the request
num_tries += 1
self.log.debug(
'Attempt #%(try)s failed' % {'try': num_tries})
time.sleep(0.5)
continue
# Fail and raise an error
try:
msg = httplib.responses[http_status]
except (KeyError, AttributeError):
msg = 'Unknown HTTP Server Response'
raise ServerError(url, http_status, msg)
# Successfully returned data
break
# In case the server returned a new session cookie to us
new_session = response.cookies.get(self.session_name, '')
try:
data = response.json
# Compatibility with newer python-requests
if callable(data):
data = data()
except ValueError, e:
# The response wasn't JSON data
raise ServerError(
url, http_status, 'Error returned from'
' json module while processing %(url)s: %(err)s' %
{'url': to_bytes(url), 'err': to_bytes(e)})
if 'exc' in data:
name = data.pop('exc')
message = data.pop('tg_flash')
raise AppError(name=name, message=message, extras=data)
# If we need to return a cookie for deprecated code, convert it here
if self.session_as_cookie:
cookie = Cookie.SimpleCookie()
cookie[self.session_name] = new_session
new_session = cookie
self.log.debug('proxyclient.send_request: exited')
data = bunchify(data)
return new_session, data
__all__ = (ProxyClient,)
|
nirik/python-fedora
|
fedora/client/proxyclient.py
|
Python
|
gpl-2.0
| 23,068
|
[
"VisIt"
] |
d0d50345217207aa971196386863cdfd55d138569b9ddd602cd8b0d8845132c7
|
##
# Copyright 2009-2020 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
# Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# https://github.com/easybuilders/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
EasyBuild support for building and installing SAS, implemented as an easyblock
"""
import os
from easybuild.framework.easyblock import EasyBlock
from easybuild.tools.run import run_cmd_qa
class EB_SAS(EasyBlock):
"""Support for building/installing SAS."""
def __init__(self, *args, **kwargs):
"""Custom constructor for SAS easyblock, initialize custom class variables."""
super(EB_SAS, self).__init__(*args, **kwargs)
# Use default SAS Installation Data File path
self.license_file = ''
# Set custom SAS Installation Data File path if defined and existing
if self.cfg['license_file'] and os.path.isfile(self.cfg['license_file']):
self.license_file = self.cfg['license_file']
self.log.info("Custom SAS Installation Data File found: %s", self.license_file)
def configure_step(self):
"""No custom configurationprocedure for SAS."""
pass
def build_step(self):
"""No custom build procedure for SAS."""
pass
def install_step(self):
"""Custom install procedure for SAS."""
qa = {
"SAS Home:": self.installdir,
"Install SAS Software (default: Yes):": '',
"Configure SAS Software (default: Yes):": '',
"SAS Installation Data File:": self.license_file,
"Press Enter to continue:": '',
"Configure as a Unicode server (default: No):": 'N',
"SAS/ACCESS Interface to MySQL (default: Yes):": 'N',
"SAS/ACCESS Interface to Oracle (default: Yes):": 'N',
"SAS/ACCESS Interface to Sybase (default: Yes):": 'N',
"SAS/ACCESS Interface to SAP ASE (default: Yes):": 'N',
"Use PAM Authentication (default: No):": 'N',
"Port Number:": '',
"Configure SAS Studio Basic (default: Yes):": 'N',
"Press Enter to finish:": '',
"Global Standards Library:": os.path.join(self.installdir, 'cstGlobalLibrary'),
"Sample Library:": os.path.join(self.installdir, 'cstSampleLibrary'),
}
std_qa = {
"Incomplete Deployment\s*(.*[^:])+Selection:": '2', # 2: Ignore previous deployment and start again
"Select a language(.*[^:]\s*\n)+Selection:": '',
"Select Deployment Task\s*(.*[^:]\s*\n)+Selection:": '',
"Specify SAS Home\s*(.*[^:]\s*\n)+Selection:": '2', # Create a new SAS Home
"Select Deployment Type\s*(.*[^:]\n)+Selection:": '2', # 2: Install SAS Foundation
"Select Products to Install\s*(.*[^:]\n)+Selection:": '1', # SAS Foundation
"Product\s*(.*[^:]\n)+Selections:": '',
"Select Language Support\s*(.*[^:]\n)+Selections:": '',
"Select Regional Settings\s*(.*[^:]\n)+Selection:": '',
"Select Support Option\s*(.*[^:]\n)+Selection:": '2', # 2: Do Not Send
"Select SAS Foundation Products(.*[^:]\s*\n)+Selection:": '',
}
no_qa = [
"\.\.\.$",
]
run_cmd_qa("./setup.sh -console", qa, no_qa=no_qa, std_qa=std_qa, log_all=True, simple=True)
def sanity_check_step(self):
"""Custom sanity check for SAS."""
custom_paths = {
'files': [os.path.join('SASFoundation', self.version, 'sas')],
'dirs': ['licenses', os.path.join('SASFoundation', self.version, 'bin')],
}
super(EB_SAS, self).sanity_check_step(custom_paths=custom_paths)
def make_module_req_guess(self):
"""Custom path locations for SAS."""
return {
'PATH': [os.path.join('SASFoundation', self.version)],
}
|
pescobar/easybuild-easyblocks
|
easybuild/easyblocks/s/sas.py
|
Python
|
gpl-2.0
| 4,717
|
[
"ASE"
] |
e91e5040f043fe6798c7c42dd3fc1a83a5d030a6e2ab78ab69efaa7e8c1622b2
|
"""Ops and optimizations for using BLAS calls
BLAS = Basic Linear Algebra Subroutines
Learn more about BLAS here:
http://www.netlib.org/blas/blast-forum/
The standard BLAS libraries implement what is called "legacy BLAS" in that
document.
This documentation describes Theano's BLAS optimization pipeline.
Where there is a discrepancy between how things do work and how they *should*
work, both aspects should be documented.
There are four kinds of BLAS Ops in Theano:
- Python implementations (this file)
- SciPy-based (blas_scipy)
- C-based (blas_c)
- CUDA-based (theano.sandbox.cuda.blas)
Notes
-----
Unfortunately (because it's confusing) this file currently contains Ops
that contain both Python and C versions. I think it would be better to
move the C implementations to blas_c so that this file is pure Python.
-JB
Ops
===
GEMM: Dot22, Dot22Scalar, GemmRelated, Gemm
-------------------------------------------
The BLAS GEMM operation implements Z <- a X Y + b Z,
where Z, X and Y are matrices, and a and b are scalars.
Dot22 is a GEMM where a=1, b=0, and Z is allocated every time.
Dot22Scalar is a GEMM where b=0 and Z is allocated every time.
Gemm is a GEMM in all its generality.
In the future we can refactor the GemmRelated, Gemm, Dot22 and
Dot22Scalar Ops into a single Op. That new Op (Gemm2) is basically a
normal Gemm, but with an additional configuration variable that says
to ignore the input Z. Setting that configuration variable to True
would make Gemm2 equivalent to the current Dot22 and Dot22Scalar.
This would make the file a lot easier to read, and save a few hundred
lines of library, to say nothing of testing and documentation.
GEMV: Gemv
----------
The BLAS GEMV operation implements Z <- a X Y + b Z,
where X is a matrix, Y, and Z are vectors, and a and b are scalars.
GER: Ger
--------
The BLAS GER operation implements Z <- a X' Y + Z,
where X and Y are vectors, and matrix Z gets a rank-1 update.
Other Notable BLAS-related Ops
------------------------------
SYRK is another useful special case of GEMM. Particularly SYRK preserves
symmetry in the matrix that it updates. See how the linear-algebra module uses
symmetry hints before implementing this Op, so that this Op is compatible with
that system.
Optimizations
=============
The optimization pipeline works something like this:
1. identify dot22 from dot
2. identify gemm from dot22
3. identify dot22scalar from dot22 that are not gemm
4. specialize gemm to gemv where applicable
5. specialize gemm to ger where applicable
6. specialize dot22 -> gemv or ger where applicable
:note: GEMM is the most canonical BLAS signature that we deal with so far, it
would be good to turn most things into GEMM (dot, inner, outer, dot22,
dot22scalar), and then to specialize from gemm to the various other L2 and
L3 operations.
Identify Dot22
--------------
Numpy's dot supports arguments that are of any rank, and we should support that
too (just for compatibility). The BLAS optimizations work with Dot Ops whose
inputs are each either vector or matrix. So the first part of the optimization
pipeline is to transform qualifying Dot Ops to Dot22 Ops. Dot22 Ops may be
transformed further, but they will get implemented by a BLAS call.
More precisely, Dot nodes whose inputs are all vectors or matrices and whose
inputs both have the same dtype, and whose dtype is float or complex, become
Dot22. This is implemented in `local_dot_to_dot22`.
Identify Gemm from Dot22
------------------------
This is complicated, done in GemmOptimizer.
Identify Dot22Scalar from Dot22
-------------------------------
Dot22 Ops that remain after the GemmOptimizer is done have not
qualified as GEMM Ops. Still they might be scaled by a factor, in
which case we use Dot22Scalar which is like Gemm, but without the b
and the Z. In the future it would be good to merge this into the
GemmOptimizer.
Specialize Gemm to Gemv
-----------------------
If arguments to GEMM are dimshuffled vectors, then we can use GEMV
instead. This optimization is `local_gemm_to_gemv`.
"""
from __future__ import absolute_import, print_function, division
import copy
import logging
import os
import time
import numpy
import numpy.distutils
try:
import numpy.distutils.__config__
except ImportError:
pass
from six import iteritems
from six.moves import reduce, xrange
from theano import config
from theano.gof import (utils, Op, view_roots,
local_optimizer, Optimizer,
InconsistencyError, toolbox, SequenceDB,
EquilibriumOptimizer, Apply,
ReplacementDidntRemovedError)
from theano.printing import pprint, FunctionPrinter, debugprint
from theano.compile.mode import optdb
import theano.scalar
from theano.tensor import basic as T
from theano.tensor.blas_headers import blas_header_text
from theano.tensor.blas_headers import blas_header_version
from theano.tensor.opt import in2out, local_dimshuffle_lift
from theano.tensor.type import values_eq_approx_remove_inf_nan
_logger = logging.getLogger('theano.tensor.blas')
try:
import scipy.linalg.blas
have_fblas = True
try:
fblas = scipy.linalg.blas.fblas
except AttributeError:
# A change merged in Scipy development version on 2012-12-02 replaced
# `scipy.linalg.blas.fblas` with `scipy.linalg.blas`.
# See http://github.com/scipy/scipy/pull/358
fblas = scipy.linalg.blas
_blas_gemv_fns = {numpy.dtype('float32'): fblas.sgemv,
numpy.dtype('float64'): fblas.dgemv,
numpy.dtype('complex64'): fblas.cgemv,
numpy.dtype('complex128'): fblas.zgemv}
except ImportError as e:
have_fblas = False
# This is used in Gemv and ScipyGer. We use CGemv and CGer
# when theano.config.blas.ldflags is defined. So we don't need a
# warning in that case.
if not config.blas.ldflags:
_logger.warning('Failed to import scipy.linalg.blas, and '
'Theano flag blas.ldflags is empty. '
'Falling back on slower implementations for '
'dot(matrix, vector), dot(vector, matrix) and '
'dot(vector, vector) (%s)',
str(e))
# If check_init_y() == True we need to initialize y when beta == 0.
def check_init_y():
if check_init_y._result is None:
if not have_fblas:
check_init_y._result = False
y = float('NaN') * numpy.ones((2,))
x = numpy.ones((2,))
A = numpy.ones((2, 2))
gemv = _blas_gemv_fns[y.dtype]
gemv(1.0, A.T, x, 0.0, y, overwrite_y=True, trans=True)
check_init_y._result = numpy.isnan(y).any()
return check_init_y._result
check_init_y._result = None
class Gemv(Op):
"""
expression is beta * y + alpha * A x
A is matrix
x, y are vectors
alpha, beta are scalars
output is a vector that can be inplace on y
"""
__props__ = ("inplace",)
def __init__(self, inplace):
self.inplace = inplace
if inplace:
self.destroy_map = {0: [0]}
def __str__(self):
if self.inplace:
return '%s{inplace}' % self.__class__.__name__
else:
return '%s{no_inplace}' % self.__class__.__name__
def make_node(self, y, alpha, A, x, beta):
y = T.as_tensor_variable(y)
x = T.as_tensor_variable(x)
A = T.as_tensor_variable(A)
alpha = T.as_tensor_variable(alpha)
beta = T.as_tensor_variable(beta)
if y.dtype != A.dtype or y.dtype != x.dtype:
raise TypeError('Gemv requires matching dtypes',
(y.dtype, A.dtype, x.dtype))
if A.ndim != 2:
raise TypeError('gemv requires matrix for A', A.type)
if x.ndim != 1:
raise TypeError('gemv requires vector for x', x.type)
if y.ndim != 1:
raise TypeError('gemv requires vector for y', y.type)
return Apply(self, [y, alpha, A, x, beta], [y.type()])
def perform(self, node, inputs, out_storage):
y, alpha, A, x, beta = inputs
if (have_fblas and y.shape[0] != 0 and x.shape[0] != 0 and
y.dtype in _blas_gemv_fns):
gemv = _blas_gemv_fns[y.dtype]
if (A.shape[0] != y.shape[0] or A.shape[1] != x.shape[0]):
raise ValueError(
'Incompatible shapes for gemv '
'(beta * y + alpha * dot(A, x)). y: %s, A: %s, x: %s '
% (y.shape, A.shape, x.shape))
if beta == 0 and check_init_y():
y.fill(0)
# Here I suppose that A is in c order. If we don't make it
# explicitly as fortran order, scipy 0.7.2 seam to create
# a copy in fortran order instead of just reshaping it
# and using the trans flag.
# If A is already in fortran order, make it in c order and using the
# trans flag don't seam to cause slowdown.
# out_storage[0][0] = gemv(alpha, A, x, beta, y,
# overwrite_y=self.inplace)
out_storage[0][0] = gemv(alpha, A.T, x, beta, y,
overwrite_y=self.inplace, trans=True)
else:
out = numpy.dot(A, x)
if alpha != 1:
out *= alpha
if beta != 0:
if beta != 1:
out += beta * y
else:
out += y
out_storage[0][0] = numpy.asarray(out, dtype=y.dtype)
def infer_shape(self, node, input_shapes):
return [input_shapes[0]]
gemv_no_inplace = Gemv(inplace=False)
gemv_inplace = Gemv(inplace=True)
# For the user interface. Opt will make them inplace later
gemv = gemv_no_inplace
class Ger(Op):
"""
BLAS defines general rank-1 update GER as A <- A + alpha x y'
for matrix A, scalar alpha, vectors x and y.
This interface to GER allows non-destructive operation on A via the
`destructive` argument to the constructor.
"""
__props__ = ("destructive",)
def __init__(self, destructive):
self.destructive = destructive
if destructive:
self.destroy_map = {0: [0]}
def __str__(self):
if self.destructive:
return '%s{destructive}' % self.__class__.__name__
else:
return '%s{non-destructive}' % self.__class__.__name__
def make_node(self, A, alpha, x, y):
A = T.as_tensor_variable(A)
y = T.as_tensor_variable(y)
x = T.as_tensor_variable(x)
alpha = T.as_tensor_variable(alpha)
if len(set([A.dtype, alpha.dtype, x.dtype, y.dtype])) != 1:
raise TypeError('ger requires matching dtypes',
(A.dtype, alpha.dtype, x.dtype, y.dtype))
if alpha.ndim != 0:
raise TypeError('ger requires scalar alpha', alpha.type)
if A.ndim != 2:
raise TypeError('ger requires matrix for A', A.type)
if x.ndim != 1:
raise TypeError('ger requires vector for x', x.type)
if y.ndim != 1:
raise TypeError('ger requires vector for y', y.type)
if x.dtype not in ('float32', 'float64', 'complex64', 'complex128'):
raise TypeError('only float and complex types supported', x.dtype)
return Apply(self, [A, alpha, x, y], [A.type()])
def perform(self, node, inp, out):
cA, calpha, cx, cy = inp
cZ, = out
if self.destructive:
A = cA
else:
A = cA.copy()
if calpha != 1:
A += calpha * numpy.outer(cx, cy)
else:
A += numpy.outer(cx, cy)
cZ[0] = A
def infer_shape(self, node, input_shapes):
return [input_shapes[0]]
ger = Ger(destructive=False)
ger_destructive = Ger(destructive=True)
def ldflags(libs=True, flags=False, libs_dir=False, include_dir=False):
"""Extract a list of compilation flags from config.blas.ldflags.
Depending on the options, different type of flags will be kept.
It returns a list of libraries against which an Op's object file
should be linked to benefit from a BLAS implementation.
Parameters
----------
libs : bool, optional
Extract flags starting with "-l" (the default is True).
libs_dir : bool, optional
Extract flags starting with "-L" (the default is False).
include_dir : bool, optional
Extract flags starting with "-I" (the default is False).
flags: bool, optional
Extract all the other flags (the default is False).
Returns
-------
list of strings
Extracted flags.
"""
ldflags_str = theano.config.blas.ldflags
return _ldflags(ldflags_str=ldflags_str,
libs=libs,
flags=flags,
libs_dir=libs_dir,
include_dir=include_dir)
@utils.memoize
def _ldflags(ldflags_str, libs, flags, libs_dir, include_dir):
"""Extract list of compilation flags from a string.
Depending on the options, different type of flags will be kept.
Parameters
----------
ldflags_str : string
The string to process. Typically, this will be the content of
`theano.config.blas.ldflags`.
libs : bool
Extract flags starting with "-l".
flags: bool
Extract all the other flags.
libs_dir: bool
Extract flags starting with "-L".
include_dir: bool
Extract flags starting with "-I".
Returns
-------
list of strings
Extracted flags.
"""
rval = []
if libs_dir:
found_dyn = False
dirs = [x[2:] for x in ldflags_str.split()
if x.startswith('-L')]
l = _ldflags(ldflags_str=ldflags_str, libs=True,
flags=False, libs_dir=False, include_dir=False)
for d in dirs:
for f in os.listdir(d.strip('"')):
if (f.endswith('.so') or f.endswith('.dylib') or
f.endswith('.dll')):
if any([f.find(ll) >= 0 for ll in l]):
found_dyn = True
if not found_dyn and dirs:
_logger.warning(
"We did not found a dynamic library into the "
"library_dir of the library we use for blas. If you use "
"ATLAS, make sure to compile it with dynamics library.")
for t in ldflags_str.split():
# Remove extra quote.
if (t.startswith("'") and t.endswith("'")) or (t.startswith('"') and t.endswith('"')):
t = t[1:-1]
try:
t0, t1, t2 = t[0:3]
assert t0 == '-'
except Exception:
raise ValueError('invalid token "%s" in ldflags_str: "%s"'
% (t, ldflags_str))
if libs_dir and t1 == 'L':
rval.append(t[2:])
elif include_dir and t1 == 'I':
raise ValueError('Include dirs are not used for blas. We disable'
' this as this can hide other headers and this'
' is not wanted.', t)
rval.append(t[2:])
elif libs and t1 == 'l': # example -lmkl
rval.append(t[2:])
elif flags and t1 not in ['L', 'I', 'l']: # example -openmp
rval.append(t)
elif flags and t1 == 'L':
# to find it when we load the compiled op if the env of the
# used is not well configured.
rval.append('-Wl,-rpath,' + t[2:])
return rval
class GemmRelated(Op):
"""Base class for Gemm and Dot22.
This class provides a kind of templated gemm Op.
"""
__props__ = ()
def c_support_code(self):
# return cblas_header_text()
mod_str = """
#ifndef MOD
#define MOD %
#endif
static double time_time() // a time function like time.time()
{
struct timeval tv;
gettimeofday(&tv, 0);
return (double) tv.tv_sec + (double) tv.tv_usec / 1000000.0;
}
"""
return blas_header_text() + mod_str
def c_headers(self):
# std.cout doesn't require the '%' symbol to print stuff...
# so it works much better with python's string-substitution stuff.
return ['<iostream>', '<time.h>', '<sys/time.h>']
def c_libraries(self):
return ldflags()
# code_cache_version is built by subclasses from
# build_gemm_version
def c_compile_args(self):
return ldflags(libs=False, flags=True)
def c_lib_dirs(self):
return ldflags(libs=False, libs_dir=True)
def c_header_dirs(self):
return ldflags(libs=False, include_dir=True)
declare_NS = """
int unit = 0;
int type_num = PyArray_DESCR(%(_x)s)->type_num;
int type_size = PyArray_DESCR(%(_x)s)->elsize; // in bytes
npy_intp* Nx = PyArray_DIMS(%(_x)s);
npy_intp* Ny = PyArray_DIMS(%(_y)s);
npy_intp* Nz = 0; //PyArray_DIMS(%(_zout)s);
npy_intp* Sx = PyArray_STRIDES(%(_x)s);
npy_intp* Sy = PyArray_STRIDES(%(_y)s);
npy_intp* Sz = 0; //PyArray_STRIDES(%(_zout)s);
//strides for x, y, z in dimensions 0, 1
int sx_0, sx_1, sy_0, sy_1, sz_0, sz_1;
"""
# setup_z_Nz_Sz = None
check_xyz_rank2 = """
if (PyArray_NDIM(%(_x)s) != 2) {
PyErr_Format(PyExc_NotImplementedError,
"rank(x) != 2. rank(x) is %%d.",
PyArray_NDIM(%(_x)s));
%(fail)s;
}
if (PyArray_NDIM(%(_y)s) != 2) {
PyErr_Format(PyExc_NotImplementedError,
"rank(y) != 2. rank(y) is %%d.", PyArray_NDIM(%(_y)s));
%(fail)s;
}
if (%(_zout)s && PyArray_NDIM(%(_zout)s) != 2) {
PyErr_Format(PyExc_NotImplementedError,
"rank(z) != 2. rank(z) is %%d.", PyArray_NDIM(%(_zout)s));
%(fail)s;
}
"""
check_xyz_double_or_float = """
if ((PyArray_DESCR(%(_x)s)->type_num != NPY_DOUBLE)
&& (PyArray_DESCR(%(_x)s)->type_num != NPY_FLOAT))
{PyErr_SetString(PyExc_NotImplementedError, "type(x) is not double or float"); %(fail)s;}
if ((PyArray_DESCR(%(_y)s)->type_num != NPY_DOUBLE)
&& (PyArray_DESCR(%(_y)s)->type_num != NPY_FLOAT))
{PyErr_SetString(PyExc_NotImplementedError, "type(y) is not double or float"); %(fail)s;}
if ((PyArray_DESCR(%(_zout)s)->type_num != NPY_DOUBLE)
&& (PyArray_DESCR(%(_zout)s)->type_num != NPY_FLOAT))
{PyErr_SetString(PyExc_NotImplementedError, "type(z) is not double or float"); %(fail)s;}
if ((PyArray_DESCR(%(_x)s)->type_num != PyArray_DESCR(%(_y)s)->type_num)
||(PyArray_DESCR(%(_x)s)->type_num != PyArray_DESCR(%(_zout)s)->type_num))
{ PyErr_SetString(PyExc_NotImplementedError, "type(x), type(y), type(z) are not all the same"); %(fail)s; }
"""
# it is not necessary that a or b have the same type as x,y,z
check_ab_double_or_float = """
if ((PyArray_DESCR(%(_a)s)->type_num != NPY_DOUBLE)
&& (PyArray_DESCR(%(_a)s)->type_num != NPY_FLOAT))
{PyErr_SetString(PyExc_NotImplementedError, "type(a) is not double or float"); %(fail)s;}
if ((PyArray_DESCR(%(_b)s)->type_num != NPY_DOUBLE)
&& (PyArray_DESCR(%(_b)s)->type_num != NPY_FLOAT))
{PyErr_SetString(PyExc_NotImplementedError, "type(b) is not double or float"); %(fail)s;}
"""
check_dims = """
if (Nx[0] != Nz[0])
{
PyErr_Format(PyExc_ValueError,
"Shape mismatch: x has %%ld rows but z has %%ld rows",
(long int)Nx[0], (long int)Nz[0]);
%(fail)s;
}
if (Nx[1] != Ny[0])
{
PyErr_Format(PyExc_ValueError,
"Shape mismatch: x has %%ld cols (and %%ld rows) but y has %%ld rows (and %%ld cols)",
(long int)Nx[1], (long int)Nx[0], (long int)Ny[0], (long int)Ny[1]);
%(fail)s;
}
if (Ny[1] != Nz[1])
{
PyErr_Format(PyExc_ValueError,
"Shape mismatch: y has %%ld cols but z has %%ld cols",
(long int)Ny[1], (long int)Nz[1]);
%(fail)s;
}
// We must not raise an error when Nx[1] == 0. This would disable cases
// that numpy.dot accept.
"""
check_strides = """
/*
If some matrices are not contiguous on either dimensions,
or have invalid strides, copy their content into a contiguous one
*/
if ((Sx[0] < 1) || (Sx[1] < 1) || (Sx[0] MOD type_size) || (Sx[1] MOD type_size)
|| ((Sx[0] != type_size) && (Sx[1] != type_size)))
{
PyArrayObject * _x_copy = (PyArrayObject *) PyArray_Copy(%(_x)s);
if (!_x_copy)
%(fail)s
Py_XDECREF(%(_x)s);
%(_x)s = _x_copy;
Sx = PyArray_STRIDES(%(_x)s);
}
if ((Sy[0] < 1) || (Sy[1] < 1) || (Sy[0] MOD type_size) || (Sy[1] MOD type_size)
|| ((Sy[0] != type_size) && (Sy[1] != type_size)))
{
PyArrayObject * _y_copy = (PyArrayObject *) PyArray_Copy(%(_y)s);
if (!_y_copy)
%(fail)s
Py_XDECREF(%(_y)s);
%(_y)s = _y_copy;
Sy = PyArray_STRIDES(%(_y)s);
}
if ((Sz[0] < 1) || (Sz[1] < 1) || (Sz[0] MOD type_size) || (Sz[1] MOD type_size)
|| ((Sz[0] != type_size) && (Sz[1] != type_size)))
{
PyArrayObject * _z_copy = (PyArrayObject *) PyArray_Copy(%(_zout)s);
if (!_z_copy)
%(fail)s
Py_XDECREF(%(_zout)s);
%(_zout)s = _z_copy;
Sz = PyArray_STRIDES(%(_zout)s);
}
"""
encode_strides_in_unit = """
/*
encode the stride structure of _x,_y,_zout into a single integer
*/
unit |= ((Sx[1] == type_size || Nx[1]==1) ? 0x0 : (Sx[0] == type_size || Nx[0]==1) ? 0x1 : 0x2) << 8;
unit |= ((Sy[1] == type_size || Ny[1]==1) ? 0x0 : (Sy[0] == type_size || Ny[0]==1) ? 0x1 : 0x2) << 4;
unit |= ((Sz[1] == type_size || Nz[1]==1) ? 0x0 : (Sz[0] == type_size || Nz[0]==1) ? 0x1 : 0x2) << 0;
"""
compute_strides = """
/* create appropriate strides for malformed matrices that are row or column
* vectors, or empty matrices.
* In that case, the value of the stride does not really matter, but
* some versions of BLAS insist that:
* - they are not smaller than the number of elements in the array,
* - they are not 0.
*/
sx_0 = (Nx[0] > 1) ? Sx[0]/type_size : (Nx[1] + 1);
sx_1 = (Nx[1] > 1) ? Sx[1]/type_size : (Nx[0] + 1);
sy_0 = (Ny[0] > 1) ? Sy[0]/type_size : (Ny[1] + 1);
sy_1 = (Ny[1] > 1) ? Sy[1]/type_size : (Ny[0] + 1);
sz_0 = (Nz[0] > 1) ? Sz[0]/type_size : (Nz[1] + 1);
sz_1 = (Nz[1] > 1) ? Sz[1]/type_size : (Nz[0] + 1);
"""
begin_switch_typenum = """
switch (type_num)
{
"""
case_float = """
case NPY_FLOAT:
{
"""
# case_float_ab_constants = None
case_float_gemm = """
float* x = (float*)PyArray_DATA(%(_x)s);
float* y = (float*)PyArray_DATA(%(_y)s);
float* z = (float*)PyArray_DATA(%(_zout)s);
char N = 'N';
char T = 'T';
int Nz0 = Nz[0], Nz1 = Nz[1], Nx1 = Nx[1];
//std::cerr << (unit/256) MOD 16 << (unit / 16) MOD 16 << unit MOD 16<< '\\n';
//double t0 = time_time();
switch(unit)
{
case 0x000: sgemm_(&N, &N, &Nz1, &Nz0, &Nx1, &a, y, &sy_0, x, &sx_0, &b, z, &sz_0); break;
case 0x100: sgemm_(&N, &T, &Nz1, &Nz0, &Nx1, &a, y, &sy_0, x, &sx_1, &b, z, &sz_0); break;
case 0x010: sgemm_(&T, &N, &Nz1, &Nz0, &Nx1, &a, y, &sy_1, x, &sx_0, &b, z, &sz_0); break;
case 0x110: sgemm_(&T, &T, &Nz1, &Nz0, &Nx1, &a, y, &sy_1, x, &sx_1, &b, z, &sz_0); break;
case 0x001: sgemm_(&T, &T, &Nz0, &Nz1, &Nx1, &a, x, &sx_0, y, &sy_0, &b, z, &sz_1); break;
case 0x101: sgemm_(&N, &T, &Nz0, &Nz1, &Nx1, &a, x, &sx_1, y, &sy_0, &b, z, &sz_1); break;
case 0x011: sgemm_(&T, &N, &Nz0, &Nz1, &Nx1, &a, x, &sx_0, y, &sy_1, &b, z, &sz_1); break;
case 0x111: sgemm_(&N, &N, &Nz0, &Nz1, &Nx1, &a, x, &sx_1, y, &sy_1, &b, z, &sz_1); break;
default: PyErr_SetString(PyExc_ValueError, "some matrix has no unit stride"); %(fail)s;
};
//fprintf(stderr, "Calling sgemm %%i %%i %%i %%i took %%f\\n", unit, Nz1, Nz0, Nx1, time_time() - t0);
"""
case_double = """
}
break;
case NPY_DOUBLE:
{
"""
# case_double_ab_constants = None
case_double_gemm = """
double* x = (double*)PyArray_DATA(%(_x)s);
double* y = (double*)PyArray_DATA(%(_y)s);
double* z = (double*)PyArray_DATA(%(_zout)s);
char N = 'N';
char T = 'T';
int Nz0 = Nz[0], Nz1 = Nz[1], Nx1 = Nx[1];
//std::cerr << (unit/256) MOD 16 << (unit / 16) MOD 16 << unit MOD 16<< '\\n';
//double t0 = time_time();
//fprintf(stderr, "unit=%%x N= %%i %%i %%i S = %%i %%i %%i %%i %%i %%i\\n", unit,
//Nz1, Nz0, Nx1,
//sy_0, sy_1,
//sx_0, sx_1,
//sz_0, sz_1
//);
switch(unit)
{
case 0x000: dgemm_(&N, &N, &Nz1, &Nz0, &Nx1, &a, y,
&sy_0, x, &sx_0, &b, z, &sz_0); break;
case 0x100: dgemm_(&N, &T, &Nz1, &Nz0, &Nx1, &a, y,
&sy_0, x, &sx_1, &b, z, &sz_0); break;
case 0x010: dgemm_(&T, &N, &Nz1, &Nz0, &Nx1, &a, y,
&sy_1, x, &sx_0, &b, z, &sz_0); break;
case 0x110: dgemm_(&T, &T, &Nz1, &Nz0, &Nx1, &a, y,
&sy_1, x, &sx_1, &b, z, &sz_0); break;
case 0x001: dgemm_(&T, &T, &Nz0, &Nz1, &Nx1, &a, x,
&sx_0, y, &sy_0, &b, z, &sz_1); break;
case 0x101: dgemm_(&N, &T, &Nz0, &Nz1, &Nx1, &a, x,
&sx_1, y, &sy_0, &b, z, &sz_1); break;
case 0x011: dgemm_(&T, &N, &Nz0, &Nz1, &Nx1, &a, x,
&sx_0, y, &sy_1, &b, z, &sz_1); break;
case 0x111: dgemm_(&N, &N, &Nz0, &Nz1, &Nx1, &a, x,
&sx_1, y, &sy_1, &b, z, &sz_1); break;
default: PyErr_SetString(PyExc_ValueError,
"some matrix has no unit stride");
%(fail)s;
};
//fprintf(stderr, "Calling dgemm %%i %%i %%i %%i took %%f\\n",
// unit, Nz1, Nz0, Nx1, time_time()- t0);
"""
end_switch_typenum = """
}
break;
}
"""
def build_gemm_call(self):
return reduce(str.__add__, (
self.declare_NS,
self.check_xyz_rank2,
self.setup_z_Nz_Sz,
self.check_xyz_double_or_float,
self.check_ab_double_or_float,
self.check_dims,
self.check_strides,
self.encode_strides_in_unit,
self.compute_strides,
self.begin_switch_typenum,
self.case_float,
self.case_float_ab_constants,
self.case_float_gemm,
self.case_double,
self.case_double_ab_constants,
self.case_double_gemm,
self.end_switch_typenum), '')
def build_gemm_version(self):
return (13, blas_header_version())
class Gemm(GemmRelated):
"""In-place version of matrix-matrix multiplication (with accumulation).
When a and b are scalars and x, y, and z are matrices, then
gemm(z,a,x,y,b)
is similar to
b*z + a*dot(x,y)
The difference between the two is that the top form is destructive
on z, whereas the bottom form is not. Gemm works in-place on the
storage associated with z, and the L{Variable} returned by Gemm
has a storage that will be aliased to the storage of the z
argument. Because of this in-place computation, an L{Apply} of
this op will destroy the L{Variable} z on which it operates. (See
L{DestructiveOps} for an explanation of what destroying means in
the context of theano graphs. See L{BlasLapackSupport} for more
optimized linear algebra operations.)
"""
E_rank = 'gemm only works for rank 2'
E_scalar = 'gemm requires scalar argument'
E_z_uniq = 'argument z aliased to x or y' # TODO: justify / delete this
E_mixed = 'gemm requires matching dtypes'
E_float = 'gemm requires floating-point dtypes'
__props__ = ('inplace',)
def __init__(self, inplace):
self.inplace = inplace
if self.inplace:
self.destroy_map = {0: [0]}
self.setup_z_Nz_Sz = self.setup_z_Nz_Sz_inplace
else:
self.setup_z_Nz_Sz = self.setup_z_Nz_Sz_outplace
def __str__(self):
if self.inplace:
inplace_str = 'inplace'
else:
inplace_str = 'no_inplace'
return '%s{%s}' % (self.__class__.__name__, inplace_str)
def __setstate__(self, dct):
self.__dict__.update(dct)
if self.inplace:
self.setup_z_Nz_Sz = self.setup_z_Nz_Sz_inplace
else:
self.setup_z_Nz_Sz = self.setup_z_Nz_Sz_outplace
# Correctly reload older pickles where destroy_map were not
# saved
if 'destroy_map' not in self.__dict__ and self.inplace:
self.destroy_map = {0: [0]}
def __getstate__(self):
rval = self.__dict__.copy()
# Do not serialize the setup code, it will be restored in __setstate__
# depending on the value of 'inplace'
rval.pop('setup_z_Nz_Sz')
return rval
def make_node(self, *inputs):
inputs = list(map(T.as_tensor_variable, inputs))
if len(inputs) != 5:
raise TypeError(
"Wrong number of inputs for %s (expected 5, got %s)" %
(self, len(inputs)))
z, a, x, y, b = inputs
# For the consistency check we don't want z to be a cached constant.
if getattr(z, 'cached', False):
z = copy.copy(z)
zr, xr, yr = [set(view_roots(i)) for i in (z, x, y)]
# We want the gemm to be inplace. When this op is inplace, it
# declare to be inplace only on z. So to make it safe, we
# raise an error if z can be a view on x or y.
# I don't know if Theano currently can support that case. As
# this case don't happen in our code, I won't spent time
# investigating this. So the assert is for safety. I also
# think there is another mechanism that would prevent this,
# but I don't what to modify old code and have chance to break
# something.
if zr.intersection(xr):
raise InconsistencyError(Gemm.E_z_uniq, (z, x))
if zr.intersection(yr):
raise InconsistencyError(Gemm.E_z_uniq, (z, y))
if z.ndim != 2:
raise TypeError(Gemm.E_rank, z)
if a.ndim != 0:
raise TypeError(Gemm.E_scalar, a)
if x.ndim != 2:
raise TypeError(Gemm.E_rank, x)
if y.ndim != 2:
raise TypeError(Gemm.E_rank, y)
if b.ndim != 0:
raise TypeError(Gemm.E_scalar, b)
if not (z.dtype == a.dtype == x.dtype == y.dtype == b.dtype):
raise TypeError(Gemm.E_mixed,
(z.dtype, a.dtype, x.dtype, y.dtype, b.dtype))
if (not z.dtype.startswith('float') and
not z.dtype.startswith('complex')):
raise TypeError(Gemm.E_float, (z.dtype))
output = z.type()
return Apply(self, inputs, [output])
def perform(self, node, inp, out):
z, a, x, y, b = inp
zout, = out
assert a.shape == ()
assert b.shape == ()
if not self.inplace:
z = z.copy() # the original z will not be changed
if z.shape == ():
z.itemset(z * a + b * numpy.dot(x, y))
zout[0] = z
else:
if b == 0.0:
if a == 1.0:
z[:] = numpy.dot(x, y)
elif a == -1.0:
z[:] = -numpy.dot(x, y)
else:
z[:] = a * numpy.dot(x, y)
elif b == 1.0:
if a == 1.0:
z += numpy.dot(x, y)
elif a == -1.0:
z -= numpy.dot(x, y)
else:
z += a * numpy.dot(x, y)
else:
z *= b
z += a * numpy.dot(x, y)
zout[0] = z
def infer_shape(self, node, input_shapes):
return [input_shapes[0]]
setup_z_Nz_Sz_inplace = """
if (%(_zout)s != %(_z)s)
{
if (%(_zout)s)
{
Py_DECREF(%(_zout)s);
}
%(_zout)s = %(_z)s;
Py_INCREF(%(_zout)s);
}
Nz = PyArray_DIMS(%(_z)s);
Sz = PyArray_STRIDES(%(_z)s);
"""
setup_z_Nz_Sz_outplace = """
if ((NULL == %(_zout)s)
|| (PyArray_DIMS(%(_zout)s)[0] != PyArray_DIMS(%(_z)s)[0])
|| (PyArray_DIMS(%(_zout)s)[1] != PyArray_DIMS(%(_z)s)[1])
|| (PyArray_STRIDES(%(_zout)s)[0] <= 0)
|| (PyArray_STRIDES(%(_zout)s)[1] <= 0)
|| (PyArray_STRIDES(%(_zout)s)[0] MOD type_size)
|| (PyArray_STRIDES(%(_zout)s)[1] MOD type_size)
|| ((PyArray_STRIDES(%(_zout)s)[0] != type_size)
&& (PyArray_STRIDES(%(_zout)s)[1] != type_size)))
{
Py_XDECREF(%(_zout)s);
npy_intp dims[2];
dims[0] = PyArray_DIMS(%(_z)s)[0];
dims[1] = PyArray_DIMS(%(_z)s)[1];
%(_zout)s = (PyArrayObject*)PyArray_SimpleNew(2, dims,
PyArray_TYPE(%(_z)s));
//fprintf(stderr, "Gemm Allocating %%i %%i\\n", dims[0], dims[1]);
if(!%(_zout)s) {
PyErr_SetString(PyExc_MemoryError,
"failed to alloc gemm_no_inplace output");
%(fail)s
}
}
Nz = PyArray_DIMS(%(_zout)s);
Sz = PyArray_STRIDES(%(_zout)s);
if (PyArray_DESCR(%(_zout)s)->type_num == NPY_FLOAT)
{
float * zoutdata = (float*)PyArray_DATA(%(_zout)s);
int zoi = Sz[0] / sizeof(float);
int zoj = Sz[1] / sizeof(float);
const float * zdata = (float*)PyArray_DATA(%(_z)s);
int zi = PyArray_STRIDES(%(_z)s)[0]/sizeof(float);
int zj = PyArray_STRIDES(%(_z)s)[1]/sizeof(float);
for (int i = 0; i < Nz[0]; ++i)
{
for (int j = 0; j < Nz[1]; ++j)
{
zoutdata[zoi*i + zoj*j] = zdata[zi*i + zj*j];
}
}
}
else if (PyArray_DESCR(%(_zout)s)->type_num == NPY_DOUBLE)
{
double * zoutdata = (double*) PyArray_DATA(%(_zout)s);
int zoi = Sz[0] / sizeof(double);
int zoj = Sz[1] / sizeof(double);
const double * zdata = (double*)PyArray_DATA(%(_z)s);
int zi = PyArray_STRIDES(%(_z)s)[0]/sizeof(double);
int zj = PyArray_STRIDES(%(_z)s)[1]/sizeof(double);
for (int i = 0; i < Nz[0]; ++i)
{
for (int j = 0; j < Nz[1]; ++j)
{
zoutdata[zoi*i + zoj*j] = zdata[zi*i + zj*j];
}
}
}
else
{
PyErr_SetString(PyExc_AssertionError,
"neither float nor double dtype");
%(fail)s
}
"""
case_float_ab_constants = """
#define REAL float
float a = (PyArray_DESCR(%(_a)s)->type_num == NPY_FLOAT)
? (REAL)(((float*)PyArray_DATA(%(_a)s))[0])
: (REAL)(((double*)PyArray_DATA(%(_a)s))[0]);
float b = (PyArray_DESCR(%(_b)s)->type_num == NPY_FLOAT) ?
(REAL)(((float*)PyArray_DATA(%(_b)s))[0])
: (REAL)(((double*)PyArray_DATA(%(_b)s))[0]);
#undef REAL
"""
case_double_ab_constants = """
#define REAL double
double a = (PyArray_DESCR(%(_a)s)->type_num == NPY_FLOAT)
? (REAL)(((float*)PyArray_DATA(%(_a)s))[0])
: (REAL)(((double*)PyArray_DATA(%(_a)s))[0]);
double b = (PyArray_DESCR(%(_b)s)->type_num == NPY_FLOAT) ?
(REAL)(((float*)PyArray_DATA(%(_b)s))[0])
: (REAL)(((double*)PyArray_DATA(%(_b)s))[0]);
#undef REAL
"""
def c_code(self, node, name, inp, out, sub):
_z, _a, _x, _y, _b = inp
_zout, = out
if node.inputs[0].type.dtype.startswith('complex'):
raise utils.MethodNotDefined('%s.c_code'
% self.__class__.__name__)
full_code = self.build_gemm_call() % dict(locals(), **sub)
return full_code
def c_code_cache_version(self):
gv = self.build_gemm_version()
if gv:
return (5,) + gv
else:
return gv
gemm_inplace = Gemm(inplace=True)
gemm_no_inplace = Gemm(inplace=False)
# For the user interface. Theano optimization will make them inplace
gemm = gemm_no_inplace
pprint.assign(gemm_inplace, FunctionPrinter('gemm_inplace'))
pprint.assign(gemm_no_inplace, FunctionPrinter('gemm_no_inplace'))
def res_is_a(node, op, maxclients=None):
if maxclients is not None:
retval = (len(node.clients) <= maxclients)
else:
retval = True
return (node.owner and
node.owner.op == op and
retval)
def _as_scalar(res, dtype=None):
"""Return None or a TensorVariable whose type is in T.float_scalar_types"""
if dtype is None:
dtype = config.floatX
if numpy.all(res.type.broadcastable):
while res.owner and isinstance(res.owner.op, T.DimShuffle):
res = res.owner.inputs[0]
# may still have some number of True's
if res.type.broadcastable:
rval = res.dimshuffle()
else:
rval = res
if rval.type.dtype in theano.tensor.integer_dtypes:
# We check that the upcast of res and dtype won't change dtype.
# If dtype is float64, we will cast int64 to float64.
# This is valid when res is a scalar used as input to a dot22
# as the cast of the scalar can be done before or after the dot22
# and this will give the same result.
if theano.scalar.upcast(res.dtype, dtype) == dtype:
return T.cast(rval, dtype)
else:
return None
return rval
def _is_real_matrix(res):
return (res.type.dtype in ('float16', 'float32', 'float64') and
res.type.ndim == 2 and
res.type.broadcastable[0] is False and
res.type.broadcastable[1] is False) # cope with tuple vs. list
def _is_real_vector(res):
return (res.type.dtype in ('float16', 'float32', 'float64') and
res.type.ndim == 1 and
res.type.broadcastable[0] is False)
def _beta_L_plus_alpha_M(beta, L, alpha, M, recurse_flip=True):
# print 'BETA L + ALPHA M', beta, L, alpha, M, recurse_flip
# EXPRESSION: (beta * L) + (alpha * M)
# we've already checked the client counts, now just make the type check.
# if res_is_a(M, _dot22, 1):
if M.owner and M.owner.op == _dot22:
Ml, Mr = M.owner.inputs
rval = [gemm_no_inplace(L, alpha, Ml, Mr, beta)]
# print 'GEMM 0', rval, beta, L, alpha, M
return rval, M
# it also might be the case that there is a dimshuffle between the +
# and the dot22. local_dot_to_dot22 in particular will put in such things.
if (M.owner and isinstance(M.owner.op, T.DimShuffle) and
M.owner.inputs[0].owner and
isinstance(M.owner.inputs[0].owner.op, Dot22)):
MM = M.owner.inputs[0]
if M.owner.op.new_order == (0,):
# it is making a column MM into a vector
MMl, MMr = MM.owner.inputs
g = gemm_no_inplace(L.dimshuffle(0, 'x'),
alpha, MMl, MMr, beta)
rval = [g.dimshuffle(0)]
return rval, MM
if M.owner.op.new_order == (1,):
# it is making a row MM into a vector
MMl, MMr = MM.owner.inputs
g = gemm_no_inplace(L.dimshuffle('x', 0),
alpha, MMl, MMr, beta)
rval = [g.dimshuffle(1)]
return rval, MM
if len(M.owner.op.new_order) == 0:
# it is making a row MM into a vector
MMl, MMr = MM.owner.inputs
g = gemm_no_inplace(L.dimshuffle('x', 'x'),
alpha, MMl, MMr, beta)
rval = [g.dimshuffle()]
return rval, MM
# this is False'd out because of inadequate testing.
# TODO see ticket #237
if False and res_is_a(M, gemm_no_inplace, 1):
# EXPRESSION: (beta * L) + (alpha * (gemm_no_inplace(G, a, u, v, b)))
# EXPRESSION: (beta * L) + alpha * (b * G) + alpha * a * dot(u, v)
G, a, u, v, b = M.owner.inputs
# print 'GEMM', G, L
if res_is_a(G, _dot22, 1):
# EXPRESSION: (beta * L) +
# (alpha * (gemm_no_inplace(dot(x,y), a, u, v, b)))
x, y = G.owner.inputs
# EXPRESSION: (beta * L) + (alpha * ((b*dot(x,y) +
# (a * dot(u, v)))))
# EXPRESSION: (beta * L) + (alpha*b*dot(x,y)) +
# (alpha * a * dot(u, v))
rval = [gemm_no_inplace(gemm_no_inplace(L, alpha * b, x, y, beta),
alpha * a, u, v, 1.0)]
return rval
if (G is L):
# EXPRESSION: (beta * L) + (alpha*b*L) + (alpha * a * dot(u, v))
rval = [gemm_no_inplace(L, alpha * a, u, v, alpha * b + beta)]
return rval
if (1.0 != alpha):
# at the very least, move the alpha inside the gemm_no_inplace
rval = [beta * L + gemm_no_inplace(G, alpha * a, u, v, alpha * b)]
return rval
if recurse_flip:
return _beta_L_plus_alpha_M(alpha, M, beta, L, recurse_flip=False)
else:
return False, False
def _gemm_canonicalize(r, scale, rval, maxclients):
# Tries to interpret node as a sum of scalars * (vectors or matrices)
def scaled(thing):
if scale == 1:
return thing
if scale == -1 and thing.type.dtype != 'bool':
return -thing
else:
return scale * thing
try:
r.type.broadcastable
except Exception:
return None
if ((r.type.ndim not in (1, 2)) or
r.type.dtype not in ('float16', 'float32', 'float64',
'complex64', 'complex128')):
rval.append(scaled(r))
return rval
if maxclients and len(getattr(r, 'clients', [])) > maxclients:
rval.append((scale, r))
return rval
if r.owner and r.owner.op == T.sub:
_gemm_canonicalize(r.owner.inputs[0], scale, rval, 1)
_gemm_canonicalize(r.owner.inputs[1], -scale, rval, 1)
elif r.owner and r.owner.op == T.add:
for i in r.owner.inputs:
_gemm_canonicalize(i, scale, rval, 1)
elif r.owner and r.owner.op == T.neg:
_gemm_canonicalize(r.owner.inputs[0], -scale, rval, 1)
elif r.owner and r.owner.op == T.mul:
scalars = []
vectors = []
matrices = []
for i in r.owner.inputs:
if numpy.all(i.type.broadcastable):
while i.owner and isinstance(i.owner.op, T.DimShuffle):
i = i.owner.inputs[0]
if i.type.broadcastable:
scalars.append(i.dimshuffle())
else:
scalars.append(i)
elif _is_real_vector(i):
vectors.append(i)
elif _is_real_matrix(i):
matrices.append(i)
else:
# just put the original arguments as in the base case
rval.append((scale, r))
return rval
if len(matrices) == 1:
assert len(vectors) == 0
m = matrices[0]
if len(scalars) == 0:
_gemm_canonicalize(m, scale, rval, 1)
elif len(scalars) == 1:
_gemm_canonicalize(m, scaled(scalars[0]), rval, 1)
else:
_gemm_canonicalize(m, T.mul(scaled(scalars[0]), *scalars[1:]),
rval, 1)
elif len(vectors) == 1:
assert len(matrices) == 0
v = vectors[0]
if len(scalars) == 0:
_gemm_canonicalize(v, scale, rval, 1)
elif len(scalars) == 1:
_gemm_canonicalize(v, scaled(scalars[0]), rval, 1)
else:
_gemm_canonicalize(v, T.mul(scaled(scalars[0]),
*scalars[1:]), rval, 1)
else: # lets not open this up
rval.append((scale, r))
else:
rval.append((scale, r))
return rval
def _factor_canonicalized(lst):
# remove duplicates from canonicalized list
# we only delete out of the right end of the list,
# once i has touched a list element, it is permantent
lst = list(lst)
# print 'FACTOR', lst
# for t in lst:
# if not isinstance(t, (list, tuple)):
# t = (t,)
# for e in t:
# try:
# theano.printing.debugprint(e)
# except TypeError:
# print e, type(e)
i = 0
while i < len(lst) - 1:
try:
s_i, M_i = lst[i]
except Exception:
i += 1
continue
j = i + 1
while j < len(lst):
try:
s_j, M_j = lst[j]
except Exception:
j += 1
continue
if M_i is M_j:
s_i = s_i + s_j
lst[i] = (s_i, M_i)
del lst[j]
else:
j += 1
i += 1
return lst
def _gemm_from_factored_list(lst):
"""
Returns None, or a list to replace node.outputs.
"""
lst2 = []
# Remove the tuple that can't be cast correctly.
# This can happen when we try to cast a complex to a real
for sM in lst:
# Make every pair in list have matching dtypes
# sM can be a tuple of 2 elements or a theano variable.
if isinstance(sM, tuple):
sm0, sm1 = sM
sm0 = T.as_tensor_variable(sm0)
if theano.scalar.upcast(sm0.dtype, sm1.dtype) == sm1.dtype:
lst2.append((T.cast(sm0, sm1.dtype), sM[1]))
lst = lst2
def item_to_var(t):
try:
s, M = t
except Exception:
return t
if s == 1:
return M
if s == -1:
return -M
return s * M
# Try every pair in the sM_list, trying to turn it into a gemm operation
for i in xrange(len(lst) - 1):
s_i, M_i = lst[i]
for j in xrange(i + 1, len(lst)):
s_j, M_j = lst[j]
if M_i.type != M_j.type:
continue
# print 'TRYING', (s_i, M_i, s_j, M_j)
gemm_of_sM_list, old_dot22 = _beta_L_plus_alpha_M(s_i, M_i,
s_j, M_j)
# print 'GOT IT', gemm_of_sM_list
if gemm_of_sM_list:
assert len(gemm_of_sM_list) == 1
add_inputs = [item_to_var(input)
for k, input in enumerate(lst) if k not in (i, j)]
add_inputs.extend(gemm_of_sM_list)
if len(add_inputs) > 1:
rval = [T.add(*add_inputs)]
else:
rval = add_inputs
# print "RETURNING GEMM THIGN", rval
return rval, old_dot22
def _gemm_from_node2(node):
"""
:todo: In many expressions, there are many ways to turn it into a
gemm. For example dot(a,b) + c + d. This function should
return all of them, so that if one version of gemm causes a
cycle in the graph, then another application of gemm can be
tried.
"""
lst = []
t0 = time.time()
_gemm_canonicalize(node.outputs[0], 1.0, lst, 0)
t1 = time.time()
# print "GEMM CANON", lst
if len(lst) > 1:
lst = _factor_canonicalized(lst)
t2 = time.time()
rval = _gemm_from_factored_list(lst)
t3 = time.time()
# It can happen that _factor_canonicalized and
# _gemm_from_factored_list return a node with an incorrect
# type. This happens in particular when one of the scalar
# factors forces the upcast of the whole expression. In that
# case, we simply skip that candidate for Gemm. This was
# discussed in
# http://groups.google.com/group/theano-dev/browse_thread/thread/a3096c82856e3ad5,
# but never made it into a trac ticket.
if rval and (rval[0][0].type == node.outputs[0].type):
return rval, t1 - t0, t2 - t1, t3 - t2
return None, t1 - t0, 0, 0
class GemmOptimizer(Optimizer):
"""Graph optimizer for inserting Gemm operations."""
def __init__(self):
Optimizer.__init__(self)
self.warned = False
def add_requirements(self, fgraph):
fgraph.attach_feature(toolbox.ReplaceValidate())
def apply(self, fgraph):
did_something = True
nb_iter = 0
nb_replacement = 0
nb_replacement_didn_t_remove = 0
nb_inconsistency_make = 0
nb_inconsistency_replace = 0
time_canonicalize = 0
time_factor_can = 0
time_factor_list = 0
time_toposort = 0
if fgraph.profile:
validate_before = fgraph.profile.validate_time
callbacks_before = fgraph.execute_callbacks_times.copy()
callback_before = fgraph.execute_callbacks_time
def on_import(new_node):
if new_node is not node:
nodelist.append(new_node)
u = theano.gof.opt.Updater(on_import, None, None,
name="GemmOptimizer")
fgraph.attach_feature(u)
while did_something:
nb_iter += 1
t0 = time.time()
nodelist = theano.gof.graph.io_toposort(fgraph.inputs, fgraph.outputs)
time_toposort += time.time() - t0
did_something = False
nodelist.reverse()
for node in nodelist:
if not (isinstance(node.op, T.Elemwise) and
isinstance(node.op.scalar_op,
(theano.scalar.Add, theano.scalar.Sub,
theano.scalar.Neg, theano.scalar.Mul))):
continue
if node not in fgraph.apply_nodes:
# This mean that we already removed this node from
# the graph
continue
try:
new_outputs, time1, time2, time3 = _gemm_from_node2(node)
time_canonicalize += time1
time_factor_can += time2
time_factor_list += time3
except InconsistencyError:
nb_inconsistency_make += 1
continue
if new_outputs:
new_outputs, old_dot22 = new_outputs
assert len(new_outputs) == len(node.outputs)
new_outputs[0].tag.values_eq_approx = values_eq_approx_remove_inf_nan
try:
fgraph.replace_all_validate_remove(
list(zip(node.outputs, new_outputs)),
[old_dot22],
reason='GemmOptimizer',
# For now we disable the warning as we know case
# that we need to fix.
warn=False, # warn=not self.warned
)
did_something = True
nb_replacement += 1
except InconsistencyError:
# TODO: retry other applications of gemm (see comment
# in _gemm_from_node)
nb_inconsistency_replace += 1
except ReplacementDidntRemovedError:
nb_replacement_didn_t_remove += 1
self.warned = True
fgraph.remove_feature(u)
if fgraph.profile:
validate_time = fgraph.profile.validate_time - validate_before
callback_time = fgraph.execute_callbacks_time - callback_before
callbacks_time = {}
for k, v in iteritems(fgraph.execute_callbacks_times):
if k in callbacks_before:
callbacks_time[k] = v - callbacks_before[k]
else:
callbacks_time[k] = v
else:
validate_time = None
callback_time = None
callbacks_time = {}
return (self, nb_iter, nb_replacement, nb_replacement_didn_t_remove,
nb_inconsistency_make, nb_inconsistency_replace,
time_canonicalize, time_factor_can,
time_factor_list, time_toposort,
validate_time, callback_time, callbacks_time,)
@staticmethod
def print_profile(stream, prof, level=0):
blanc = (' ' * level)
print(blanc, "GemmOptimizer", file=stream)
print(blanc, " nb_iter", prof[1], file=stream)
print(blanc, " nb_replacement", prof[2], file=stream)
print(blanc, " nb_replacement_didn_t_remove", prof[3], file=stream)
print(blanc, " nb_inconsistency_make", prof[4], file=stream)
print(blanc, " nb_inconsistency_replace", prof[5], file=stream)
print(blanc, " time_canonicalize", prof[6], file=stream)
print(blanc, " time_factor_can", prof[7], file=stream)
print(blanc, " time_factor_list", prof[8], file=stream)
print(blanc, " time_toposort", prof[9], file=stream)
print(blanc, " validate_time", prof[10], file=stream)
print(blanc, " callback_time", prof[11], file=stream)
if prof[11] > 1:
print(blanc, " callbacks_time", file=stream)
for i in sorted(iteritems(prof[12]), key=lambda a: a[1]):
if i[1] > 0:
print(i)
class Dot22(GemmRelated):
"""Compute a matrix-matrix product.
This is a specialization of the more general Dot().
"""
def make_node(self, x, y):
dtypes = ('float16', 'float32', 'float64', 'complex64', 'complex128')
if x.type.ndim != 2 or x.type.dtype not in dtypes:
raise TypeError(x)
if y.type.ndim != 2 or y.type.dtype not in dtypes:
raise TypeError(y)
if y.type.dtype != x.type.dtype:
raise TypeError('dtype mismatch to Dot22')
bz = (x.type.broadcastable[0], y.type.broadcastable[1])
outputs = [T.tensor(x.type.dtype, bz)]
return Apply(self, [x, y], outputs)
def perform(self, node, inp, out):
x, y = inp
z, = out
try:
z[0] = numpy.asarray(numpy.dot(x, y))
except ValueError as e:
# The error raised by numpy has no shape information, we mean to
# add that
e.args = e.args + (x.shape, y.shape)
raise
def infer_shape(self, node, input_shapes):
return [[input_shapes[0][0], input_shapes[1][1]]]
setup_z_Nz_Sz = """
if ((NULL == %(_zout)s)
|| (PyArray_DIMS(%(_zout)s)[0] != PyArray_DIMS(%(_x)s)[0])
|| (PyArray_DIMS(%(_zout)s)[1] != PyArray_DIMS(%(_y)s)[1]))
{
if (NULL != %(_zout)s) Py_XDECREF(%(_zout)s);
npy_intp dims[2];
dims[0] = PyArray_DIMS(%(_x)s)[0];
dims[1] = PyArray_DIMS(%(_y)s)[1];
%(_zout)s = (PyArrayObject*)PyArray_SimpleNew(2, dims,
PyArray_TYPE(%(_x)s));
//fprintf(stderr, "Dot Allocating %%i %%i\\n", dims[0], dims[1]);
if(!%(_zout)s) {
PyErr_SetString(PyExc_MemoryError,
"failed to alloc dot22 output");
%(fail)s
}
}
Nz = PyArray_DIMS(%(_zout)s);
Sz = PyArray_STRIDES(%(_zout)s);
"""
check_ab_double_or_float = ""
case_float_ab_constants = """
float a = 1.0;
float b = 0.0;
"""
case_double_ab_constants = """
double a = 1.0;
double b = 0.0;
"""
def c_code(self, node, name, inp, out, sub): # DEBUG
_x, _y = inp
_zout, = out
if node.inputs[0].type.dtype.startswith('complex'):
raise utils.MethodNotDefined('%s.c_code'
% self.__class__.__name__)
if len(self.c_libraries()) <= 0:
return super(Dot22, self).c_code(node, name, (_x, _y),
(_zout, ), sub)
full_code = self.build_gemm_call() % dict(locals(), **sub)
return full_code
def c_code_cache_version(self):
gv = self.build_gemm_version()
if gv:
return (2,) + gv
else:
return gv
_dot22 = Dot22()
@local_optimizer([T.Dot])
def local_dot_to_dot22(node):
# This works for tensor.outer too because basic.outer is a macro that
# produces a dot(dimshuffle,dimshuffle) of form 4 below
if not isinstance(node.op, T.Dot):
return
x, y = node.inputs
if y.type.dtype != x.type.dtype:
# TODO: upcast one so the types match
_logger.info('Not optimizing dot with inputs %s %s %s %s',
x, y, x.type, y.type)
return
if y.type.dtype in ['float16', 'float32', 'float64', 'complex64', 'complex128']:
if x.ndim == 2 and y.ndim == 2:
# print "local_dot_to_dot22: MM"
return [_dot22(*node.inputs)]
if x.ndim == 2 and y.ndim == 1:
# print "local_dot_to_dot22: MV"
return [_dot22(x, y.dimshuffle(0, 'x')).dimshuffle(0)]
if x.ndim == 1 and y.ndim == 2:
# print "local_dot_to_dot22: VM"
return [_dot22(x.dimshuffle('x', 0), y).dimshuffle(1)]
if x.ndim == 1 and y.ndim == 1:
# print "local_dot_to_dot22: VV"
return [_dot22(x.dimshuffle('x', 0),
y.dimshuffle(0, 'x')).dimshuffle()]
_logger.info('Not optimizing dot with inputs %s %s %s %s',
x, y, x.type, y.type)
@local_optimizer([gemm_no_inplace], inplace=True)
def local_inplace_gemm(node):
if node.op == gemm_no_inplace:
return [gemm_inplace(*node.inputs)]
@local_optimizer([gemv_no_inplace], inplace=True)
def local_inplace_gemv(node):
if node.op == gemv_no_inplace:
return [gemv_inplace(*node.inputs)]
@local_optimizer([ger], inplace=True)
def local_inplace_ger(node):
if node.op == ger:
return [ger_destructive(*node.inputs)]
@local_optimizer([gemm_no_inplace])
def local_gemm_to_gemv(node):
"""GEMM acting on row or column matrices -> GEMV."""
if node.op == gemm_no_inplace:
z, a, x, y, b = node.inputs
if z.broadcastable == x.broadcastable == (True, False):
r = gemv_no_inplace(z.dimshuffle(1), a, y.T, x.dimshuffle(1), b)
return [r.dimshuffle('x', 0)]
if z.broadcastable == y.broadcastable == (False, True):
r = gemv_no_inplace(z.dimshuffle(0), a, x, y.dimshuffle(0), b)
return [r.dimshuffle(0, 'x')]
@local_optimizer([gemm_no_inplace])
def local_gemm_to_ger(node):
"""GEMM computing an outer-product -> GER."""
if node.op == gemm_no_inplace:
z, a, x, y, b = node.inputs
if x.broadcastable[1] and y.broadcastable[0]:
# x and y are both vectors so this might qualifies for a GER
xv = x.dimshuffle(0)
yv = y.dimshuffle(1)
try:
bval = T.get_scalar_constant_value(b)
except T.NotScalarConstantError:
# b isn't a constant, GEMM is doing useful pre-scaling
return
if bval == 1: # best case a natural GER
rval = ger(z, a, xv, yv)
return [rval]
elif bval == 0: # GER on zeros_like should be faster than GEMM
zeros = T.zeros([x.shape[0], y.shape[1]], x.dtype)
rval = ger(zeros, a, xv, yv)
return [rval]
else:
# if bval is another constant, then z is being usefully
# pre-scaled and GER isn't really the right tool for the job.
return
# TODO: delete this optimization when we have the proper dot->gemm->ger pipeline
# working
@local_optimizer([_dot22])
def local_dot22_to_ger_or_gemv(node):
"""dot22 computing an outer-product -> GER."""
if node.op == _dot22:
x, y = node.inputs
xb = x.broadcastable
yb = y.broadcastable
one = T.as_tensor_variable(numpy.asarray(1, dtype=x.dtype))
zero = T.as_tensor_variable(numpy.asarray(0, dtype=x.dtype))
if xb[1] and yb[0]:
# x and y are both vectors so this might qualifies for a GER
xv = x.dimshuffle(0)
yv = y.dimshuffle(1)
zeros = T.zeros([x.shape[0], y.shape[1]], dtype=x.dtype)
rval = ger(zeros, one, xv, yv)
return [rval]
if xb[0] and yb[1]:
# x and y are both vectors so this qualifies for a sdot / ddot
# TODO: Theano doesn't have a sdot, but gemv is better than _dot22
xv = x.dimshuffle(1)
zeros = T.AllocEmpty(x.dtype)(1)
rval = gemv_no_inplace(zeros, one, y.T, xv, zero)
return [rval.dimshuffle('x', 0)]
if xb[0] and not yb[0] and not yb[1]:
# x is vector, y is matrix so try gemv
xv = x.dimshuffle(1)
zeros = T.AllocEmpty(x.dtype)(y.shape[1])
rval = gemv_no_inplace(zeros, one, y.T, xv, zero)
return [rval.dimshuffle('x', 0)]
if not xb[0] and not xb[1] and yb[1]:
# x is matrix, y is vector, try gemv
yv = y.dimshuffle(0)
zeros = T.AllocEmpty(x.dtype)(x.shape[0])
rval = gemv_no_inplace(zeros, one, x, yv, zero)
return [rval.dimshuffle(0, 'x')]
#################################
#
# Set up the BlasOpt optimizer
#
#################################
blas_optdb = SequenceDB()
# run after numerical stability optimizations (1.5)
optdb.register('BlasOpt', blas_optdb, 1.7, 'fast_run', 'fast_compile')
# run before specialize (2.0) because specialize is basically a
# free-for-all that makes the graph crazy.
# fast_compile is needed to have GpuDot22 created.
blas_optdb.register('local_dot_to_dot22',
in2out(local_dot_to_dot22),
0, 'fast_run', 'fast_compile')
blas_optdb.register('gemm_optimizer',
GemmOptimizer(),
10, 'fast_run')
blas_optdb.register('local_gemm_to_gemv',
EquilibriumOptimizer([local_gemm_to_gemv,
local_gemm_to_ger,
local_dot22_to_ger_or_gemv,
local_dimshuffle_lift],
max_use_ratio=5,
ignore_newtrees=False),
15, 'fast_run')
# After destroyhandler(49.5) but before we try to make elemwise things
# inplace (75)
blas_opt_inplace = in2out(local_inplace_gemm,
local_inplace_gemv,
local_inplace_ger,
name="blas_opt_inplace")
optdb.register('InplaceBlasOpt',
blas_opt_inplace,
70.0, 'fast_run', 'inplace', 'blas_opt_inplace')
class Dot22Scalar(GemmRelated):
"""Compute a matrix-matrix product.
This is a specialization of the more general Dot()
Used to call optimized gemm implementation.
Also used to generate a gemm later.
compute scalar*dot(x,y).
"""
def make_node(self, x, y, a):
if a.ndim != 0:
raise TypeError(Gemm.E_scalar, a)
if x.ndim != 2:
raise TypeError(Gemm.E_rank, x)
if y.ndim != 2:
raise TypeError(Gemm.E_rank, y)
if not (a.dtype == x.dtype == y.dtype):
raise TypeError('Dot22Scalar requires matching dtypes',
(a.dtype, x.dtype, y.dtype))
if (not a.dtype.startswith('float') and
not a.dtype.startswith('complex')):
raise TypeError('Dot22Scalar requires float or complex args',
a.dtype)
bz = [x.type.broadcastable[0], y.type.broadcastable[1]]
outputs = [T.tensor(x.type.dtype, bz)]
return Apply(self, [x, y, a], outputs)
def perform(self, node, inp, out):
x, y, scalar = inp
z, = out
try:
z[0] = numpy.asarray(scalar * numpy.dot(x, y))
except ValueError as e:
# The error raised by numpy has no shape information, we
# mean to add that
e.args = e.args + (x.shape, y.shape)
raise
def infer_shape(self, node, input_shapes):
return [[input_shapes[0][0], input_shapes[1][1]]]
setup_z_Nz_Sz = Dot22.setup_z_Nz_Sz
check_ab_double_or_float = """
if ((PyArray_DESCR(%(_a)s)->type_num != NPY_DOUBLE)
&& (PyArray_DESCR(%(_a)s)->type_num != NPY_FLOAT))
{PyErr_SetString(PyExc_NotImplementedError,
"type(a) is not double or float"); %(fail)s;}
"""
case_float_ab_constants = """
#define REAL float
float a = (PyArray_DESCR(%(_a)s)->type_num == NPY_FLOAT)
? (REAL)(((float*)PyArray_DATA(%(_a)s))[0])
: (REAL)(((double*)PyArray_DATA(%(_a)s))[0]);
#undef REAL
float b = 0.0;
"""
case_double_ab_constants = """
#define REAL double
double a = (PyArray_DESCR(%(_a)s)->type_num == NPY_FLOAT)
? (REAL)(((float*)PyArray_DATA(%(_a)s))[0])
: (REAL)(((double*)PyArray_DATA(%(_a)s))[0]);
#undef REAL
double b = 0.0;
"""
def c_code(self, node, name, inp, out, sub):
_x, _y, _a = inp
_zout, = out
if node.inputs[0].type.dtype.startswith('complex'):
raise utils.MethodNotDefined('%s.c_code'
% self.__class__.__name__)
if len(self.c_libraries()) <= 0:
return super(Dot22Scalar, self).c_code(node, name, (_x, _y),
(_zout, ), sub)
full_code = self.build_gemm_call() % dict(locals(), **sub)
return full_code
def c_code_cache_version(self):
gv = self.build_gemm_version()
if gv:
return (2,) + gv
else:
return gv
_dot22scalar = Dot22Scalar()
@local_optimizer([T.mul])
def local_dot22_to_dot22scalar(node):
"""
Notes
-----
Previous attempts to alter this optimization to replace dot22 with
gemm instead of dot22scalar resulted in some Scan nodes being
duplicated and the ScanSaveMem optimization never running on them,
resulting in highly increased memory usage. Until this issue is
resolved, this optimization should keep using dot22scalar instead of
gemm.
We upcast the scalar if after the multiplication with the dot this give
the same type.
We execute this optimizer after the gemm optimizer. This
allow to give more priority to gemm that give more speed up
then this optimizer, but allow the gemm optimizer to ignore
this op.
TODO: support when we can reorder the mul to generate a
dot22scalar or fix the canonizer to merge them(1 mul with multiple
inputs)
"""
if node.op != T.mul:
return False
i_dot22 = [x.owner and x.owner.op == _dot22 for x in node.inputs]
if not any(i_dot22):
return False # no dot22
if i_dot22.count(True) > 1:
# TODO: try each of them.
pass
# return False #TODO fix
dot22_idx = i_dot22.index(True)
d = node.inputs[dot22_idx]
i_scalar = [_as_scalar(x, dtype=d.dtype) for x in node.inputs]
if not any(i_scalar):
# Check if we can reorder the graph as this mul have a mul in inputs.
# We support only 1 additional level of mul.
# The canonizer should have merged those mul together.
i_mul = [x.owner and x.owner.op == T.mul and
any([_as_scalar(x_i, dtype=d.dtype)
for x_i in x.owner.inputs])
for x in node.inputs]
if not any(i_mul):
# no scalar in input and no multiplication
# if their was a multiplication we couls reorder the graph
# by the associativity of the graph.
return False
mul_idx = i_mul.index(True) # The first one should always work
m = node.inputs[mul_idx]
scalar_idx = -1
for i, x in enumerate(m.owner.inputs):
if _as_scalar(x, dtype=d.dtype) and (theano.scalar.upcast(
x.type.dtype, d.type.dtype) == d.type.dtype):
scalar_idx = i
break
if scalar_idx < 0:
_logger.info('Not optimizing dot22 with inputs %s %s, as the'
' type of the scalar cannot be upcasted to the'
' matrix type',
node.inputs, [x.type for x in node.inputs])
return False
a = T.cast(_as_scalar(m.owner.inputs[scalar_idx],
dtype=d.dtype), d.type.dtype)
assert not a.type.ndim
dot = _dot22scalar(d.owner.inputs[0], d.owner.inputs[1], a)
# The other inputs to the original node that were
# neither part of the dot22 or this mul should be
# factors in the returned "mul" node.
assert dot22_idx != mul_idx
other_factors = [inpt
for i, inpt in enumerate(node.inputs)
if i not in (dot22_idx, mul_idx)]
other_m_inputs = [inpt
for i, inpt in enumerate(m.owner.inputs)
if i != scalar_idx]
return [T.mul(dot, *(other_factors + other_m_inputs))]
scalar_idx = -1
for i, x in enumerate(node.inputs):
if (i != dot22_idx and i_scalar[i] is not None and
(theano.scalar.upcast(x.type.dtype, d.type.dtype) ==
d.type.dtype)):
scalar_idx = i
break
if scalar_idx < 0:
_logger.info('Not optimizing dot22 with inputs %s %s, as the type '
'of the scalar cannot be upcasted to the matrix type',
node.inputs, [x.type for x in node.inputs])
return False
assert scalar_idx < len(node.inputs)
s = node.inputs[scalar_idx]
o = copy.copy(node.inputs)
o.remove(d)
o.remove(s)
a = T.cast(i_scalar[scalar_idx], d.type.dtype)
assert not a.type.ndim
if len(o) == 0:
return [_dot22scalar(d.owner.inputs[0], d.owner.inputs[1], a)]
else:
return [T.mul(_dot22scalar(d.owner.inputs[0],
d.owner.inputs[1], a), *o)]
# must happen after gemm as the gemm optimizer don't understant
# dot22scalar and gemm give more speed up then dot22scalar
blas_optdb.register('local_dot22_to_dot22scalar',
in2out(local_dot22_to_dot22scalar),
11, 'fast_run')
class BatchedDot(Op):
"""
Computes the batched dot product of two variables:
batched_dot(a, b)[i] = dot(a[i], b[i])
"""
__props__ = ()
def make_node(self, *inputs):
inputs = list(map(T.as_tensor_variable, inputs))
if len(inputs) != 2:
raise TypeError("theano.tensor.blas.BatchedDot: 2 arguments"
" required, %d given " % len(inputs))
if inputs[0].ndim not in (2, 3):
raise TypeError("theano.tensor.blas.BatchedDot: input 0 (0-indexed)"
" must have ndim of 2 or 3, %d given. Consider"
" calling theano.tensor.batched_dot instead."
% inputs[0].ndim)
if inputs[1].ndim not in (2, 3):
raise TypeError("theano.tensor.blas.BatchedDot: input 1 (0-indexed)"
" must have ndim of 2 or 3, %d given. Consider"
" calling theano.tensor.batched_dot instead."
% inputs[1].ndim)
dtype = theano.scalar.upcast(*[input.type.dtype for input in inputs])
# upcast inputs to common dtype if needed
upcasted_inputs = [T.cast(input, dtype) for input in inputs]
broadcastable = ((inputs[0].type.broadcastable[0] or
inputs[1].type.broadcastable[0],) +
inputs[0].type.broadcastable[1:-1] +
inputs[1].type.broadcastable[2:])
return Apply(self, upcasted_inputs, [T.tensor(dtype, broadcastable)])
def perform(self, node, inp, out):
x, y = inp
z, = out
if x.shape[0] != y.shape[0]:
raise TypeError(
"theano.tensor.blas.BatchedDot: inputs [%s] must have the"
" same size in axis 0, but have sizes [%s]." %
(", ".join(map(str, inp)),
", ".join([str(i.shape[0]) for i in inp])))
shape = self.infer_shape(node, [i.shape for i in inp])[0]
dtype = node.outputs[0].dtype
z0 = z[0] = numpy.empty(shape, dtype=dtype)
for i in xrange(z0.shape[0]):
z0[i] = numpy.dot(x[i], y[i])
def c_support_code(self):
batch_gemm_defn = """
template<typename dtype, typename function>
bool batch_gemm(function gemm, int type_size,
PyArrayObject* xs, PyArrayObject* ys, PyArrayObject* zs) {
npy_intp *Nx = PyArray_DIMS(xs), *Sx = PyArray_STRIDES(xs);
npy_intp *Ny = PyArray_DIMS(ys), *Sy = PyArray_STRIDES(ys);
npy_intp *Nz = PyArray_DIMS(zs), *Sz = PyArray_STRIDES(zs);
if (Nx[0] != Ny[0]) {
PyErr_Format(PyExc_ValueError,
"Shape mismatch: batch sizes unequal."
" x.shape is (%d, %d, %d),"
" y.shape is (%d, %d, %d).",
Nx[0], Nx[1], Nx[2],
Ny[0], Ny[1], Ny[2]);
return 1;
}
if (Nx[2] != Ny[1]) {
PyErr_Format(PyExc_ValueError,
"Shape mismatch: summation axis sizes unequal."
" x.shape is (%d, %d, %d),"
" y.shape is (%d, %d, %d).",
Nx[0], Nx[1], Nx[2],
Ny[0], Ny[1], Ny[2]);
return 1;
}
/* encode the stride structure of _x,_y,_z into a single integer. */
int unit = 0;
unit |= ((Sx[2] == type_size || Nx[2] == 1) ? 0x0 : (Sx[1] == type_size || Nx[1]==1) ? 0x1 : 0x2) << 8;
unit |= ((Sy[2] == type_size || Ny[2] == 1) ? 0x0 : (Sy[1] == type_size || Ny[1]==1) ? 0x1 : 0x2) << 4;
unit |= ((Sz[2] == type_size || Nz[2] == 1) ? 0x0 : (Sz[1] == type_size || Nz[1]==1) ? 0x1 : 0x2) << 0;
/* create appropriate strides for malformed matrices that are row or column
* vectors, or empty matrices.
* In that case, the value of the stride does not really matter, but
* some versions of BLAS insist that:
* - they are not smaller than the number of elements in the array,
* - they are not 0.
*/
int sx_1 = (Nx[1] > 1) ? Sx[1]/type_size : (Nx[2] + 1);
int sx_2 = (Nx[2] > 1) ? Sx[2]/type_size : (Nx[1] + 1);
int sy_1 = (Ny[1] > 1) ? Sy[1]/type_size : (Ny[2] + 1);
int sy_2 = (Ny[2] > 1) ? Sy[2]/type_size : (Ny[1] + 1);
int sz_1 = (Nz[1] > 1) ? Sz[1]/type_size : (Nz[2] + 1);
int sz_2 = (Nz[2] > 1) ? Sz[2]/type_size : (Nz[1] + 1);
dtype* x = (dtype*)PyArray_DATA(xs);
dtype* y = (dtype*)PyArray_DATA(ys);
dtype* z = (dtype*)PyArray_DATA(zs);
dtype a = 1.0;
dtype b = 0.0;
char N = 'N';
char T = 'T';
int Nz1 = Nz[1], Nz2 = Nz[2], Nx2 = Nx[2];
// loop over batch axis
for (int i = 0; i < Nz[0]; i++) {
switch(unit)
{
case 0x000: gemm(&N, &N, &Nz2, &Nz1, &Nx2, &a, y, &sy_1, x, &sx_1, &b, z, &sz_1); break;
case 0x100: gemm(&N, &T, &Nz2, &Nz1, &Nx2, &a, y, &sy_1, x, &sx_2, &b, z, &sz_1); break;
case 0x010: gemm(&T, &N, &Nz2, &Nz1, &Nx2, &a, y, &sy_2, x, &sx_1, &b, z, &sz_1); break;
case 0x110: gemm(&T, &T, &Nz2, &Nz1, &Nx2, &a, y, &sy_2, x, &sx_2, &b, z, &sz_1); break;
case 0x001: gemm(&T, &T, &Nz1, &Nz2, &Nx2, &a, x, &sx_1, y, &sy_1, &b, z, &sz_2); break;
case 0x101: gemm(&N, &T, &Nz1, &Nz2, &Nx2, &a, x, &sx_2, y, &sy_1, &b, z, &sz_2); break;
case 0x011: gemm(&T, &N, &Nz1, &Nz2, &Nx2, &a, x, &sx_1, y, &sy_2, &b, z, &sz_2); break;
case 0x111: gemm(&N, &N, &Nz1, &Nz2, &Nx2, &a, x, &sx_2, y, &sy_2, &b, z, &sz_2); break;
default: PyErr_SetString(PyExc_ValueError, "some matrix has no unit stride"); return 1;
};
x += Sx[0] / type_size;
y += Sy[0] / type_size;
z += Sz[0] / type_size;
}
return 0;
}
"""
return blas_header_text() + batch_gemm_defn
def c_libraries(self):
return ldflags()
def c_compile_args(self):
return ldflags(libs=False, flags=True)
def c_lib_dirs(self):
return ldflags(libs=False, libs_dir=True)
def c_header_dirs(self):
return ldflags(libs=False, include_dir=True)
def c_code_cleanup(self, node, name, inputs, outputs, sub):
return """
// clean up views
Py_XDECREF(xs); xs = 0;
Py_XDECREF(ys); ys = 0;
Py_XDECREF(zs); zs = 0;
"""
def c_code(self, node, name, inp, out, sub):
_x, _y = inp
_z, = out
fail = sub["fail"]
# generate contiguity condition
def contiguous(var, ndim):
strides = "PyArray_STRIDES(%s)" % var
if ndim == 1:
return "{strides}[0] == type_size".format(strides=strides)
return " && ".join([
" && ".join("{strides}[{i}] > 0 && {strides}[{i}] % type_size == 0"
.format(strides=strides, i=i) for i in range(1, ndim)),
"(%s)" % " || ".join("{strides}[{i}] == type_size"
.format(strides=strides, i=i) for i in range(1, ndim)),
])
x_ndim, y_ndim, z_ndim = node.inputs[0].ndim, node.inputs[1].ndim, node.outputs[0].ndim
# generate code to allocate output based on runtime input shapes
z_dims = ["PyArray_DIMS(%s)[0]" % _x]
if x_ndim == 3:
z_dims.append("PyArray_DIMS(%s)[1]" % _x)
if y_ndim == 3:
z_dims.append("PyArray_DIMS(%s)[2]" % _y)
assert len(z_dims) == z_ndim
z_shape_correct = " && ".join("PyArray_DIMS(%s)[%i] == %s"
% (_z, i, dim) for i, dim in enumerate(z_dims))
z_shape = ", ".join(z_dims)
z_contiguous = contiguous(_z, z_ndim)
allocate = """
if (NULL == %(_z)s || !(%(z_shape_correct)s) || !(%(z_contiguous)s))
{
npy_intp dims[%(z_ndim)s] = {%(z_shape)s};
Py_XDECREF(%(_z)s);
%(_z)s = (PyArrayObject*)PyArray_SimpleNew(
%(z_ndim)s, dims, PyArray_TYPE(%(_x)s));
if(!%(_z)s) {
PyErr_SetString(PyExc_MemoryError,
"failed to alloc BatchedDot output");
%(fail)s
}
}
""" % locals()
# code to reallocate inputs contiguously if necessary
contiguate = []
for var, ndim in [(_x, x_ndim), (_y, y_ndim)]:
_contiguous = contiguous(var, ndim)
contiguate.append("""
if (!(%(_contiguous)s)) {
PyArrayObject * _copy = (PyArrayObject *) PyArray_Copy(%(var)s);
if (!_copy)
%(fail)s
Py_XDECREF(%(var)s);
%(var)s = _copy;
}
""" % locals())
contiguate = "\n".join(contiguate)
def c_dimshuffle(newname, oldname, shape):
_fail = fail
_shape = ", ".join("1" if axis is None else "PyArray_DIMS(%s)[%i]" % (oldname, axis)
for axis in shape)
return """{
npy_intp dims[3] = {%(_shape)s};
PyArray_Dims newshape = {dims, 3};
%(newname)s = (PyArrayObject*)PyArray_Newshape(%(oldname)s, &newshape, NPY_ANYORDER);
if (!%(newname)s)
%(_fail)s
// make sure we didn't accidentally copy
assert(PyArray_DATA(%(oldname)s) == PyArray_DATA(%(newname)s));
}""" % locals()
# create tensor3 views for any of x, y, z that are not tensor3, so that
# we only need to implement the tensor3-tensor3 batched dot product.
# xs, ys and zs will point to these views, or to the original array if
# it was already tensor3.
# in the latter case, we artificially increase the reference count of
# the original array so that the c_code_cleanup method can decref them
# all indiscriminately.
upcast = []
if x_ndim == 3:
upcast.append("xs = %(_x)s; Py_XINCREF(xs);")
elif x_ndim == 2:
upcast.append(c_dimshuffle("xs", _x, (0, None, 1)))
if y_ndim == 3:
upcast.append("ys = %(_y)s; Py_XINCREF(ys);")
elif y_ndim == 2:
upcast.append(c_dimshuffle("ys", _y, (0, 1, None)))
if z_ndim == 3:
upcast.append("zs = %(_z)s; Py_XINCREF(zs);")
else:
upcast.append(c_dimshuffle(
"zs", _z, (0,
None if x_ndim == 2 else 1,
None if y_ndim == 2 else 1)))
upcast = "\n".join(upcast) % locals()
return """
int type_num = PyArray_DESCR(%(_x)s)->type_num;
int type_size = PyArray_DESCR(%(_x)s)->elsize; // in bytes
// xs, ys, zs will point to views onto %(_x)s, %(_y)s, %(_z)s
PyArrayObject *xs = 0, *ys = 0, *zs = 0;
if (PyArray_NDIM(%(_x)s) != %(x_ndim)s) {
PyErr_Format(PyExc_NotImplementedError,
"rank(x) != %(x_ndim)s. rank(x) is %%d.",
PyArray_NDIM(%(_x)s));
%(fail)s;
}
if (PyArray_NDIM(%(_y)s) != %(y_ndim)s) {
PyErr_Format(PyExc_NotImplementedError,
"rank(y) != %(y_ndim)s. rank(y) is %%d.",
PyArray_NDIM(%(_y)s));
%(fail)s;
}
if (%(_z)s && PyArray_NDIM(%(_z)s) != %(z_ndim)s) {
PyErr_Format(PyExc_NotImplementedError,
"rank(z) != %(z_ndim)s. rank(z) is %%d.",
PyArray_NDIM(%(_z)s));
%(fail)s;
}
// allocate output
%(allocate)s
// reallocate any noncontiguous arrays or arrays with invalid strides
%(contiguate)s
// add dims to make sure everything is tensor3
%(upcast)s
// from here on, use xs, ys and zs as they are tensor3 and share memory
// with the original %(_x)s, %(_y)s and %(_z)s arrays.
if ((PyArray_DESCR(xs)->type_num != NPY_DOUBLE)
&& (PyArray_DESCR(xs)->type_num != NPY_FLOAT))
{PyErr_SetString(PyExc_NotImplementedError, "type(x) is not double or float"); %(fail)s;}
if ((PyArray_DESCR(ys)->type_num != NPY_DOUBLE)
&& (PyArray_DESCR(ys)->type_num != NPY_FLOAT))
{PyErr_SetString(PyExc_NotImplementedError, "type(y) is not double or float"); %(fail)s;}
if ((PyArray_DESCR(zs)->type_num != NPY_DOUBLE)
&& (PyArray_DESCR(zs)->type_num != NPY_FLOAT))
{PyErr_SetString(PyExc_NotImplementedError, "type(z) is not double or float"); %(fail)s;}
if ((PyArray_DESCR(xs)->type_num != PyArray_DESCR(ys)->type_num)
||(PyArray_DESCR(xs)->type_num != PyArray_DESCR(zs)->type_num))
{ PyErr_SetString(PyExc_NotImplementedError, "type(x), type(y), type(z) are not all the same"); %(fail)s; }
switch (type_num)
{
case NPY_FLOAT:
if (batch_gemm<float>(sgemm_, type_size, xs, ys, zs)) {
%(fail)s;
}
break;
case NPY_DOUBLE:
if (batch_gemm<double>(dgemm_, type_size, xs, ys, zs)) {
%(fail)s;
}
break;
}
""" % locals()
def c_code_cache_version(self):
from theano.tensor.blas_headers import blas_header_version
return (3, blas_header_version())
def grad(self, inp, grads):
x, y = inp
gz, = grads
xdim, ydim, gdim = x.type.ndim, y.type.ndim, gz.type.ndim
# grad is a vector, so x is a matrix and y is a matrix
if gdim == 1:
xgrad = gz.dimshuffle(0, 'x') * y
ygrad = gz.dimshuffle(0, 'x') * x
# x is a matrix, y is a tensor3, grad is a matrix
elif xdim == 2 and ydim == 3:
xgrad = T.batched_dot(gz, y.dimshuffle(0, 2, 1))
ygrad = x.dimshuffle(0, 1, 'x') * gz.dimshuffle(0, 'x', 1)
# x is a tensor3, y is a matrix, grad is a matrix
elif xdim == 3 and ydim == 2:
xgrad = gz.dimshuffle(0, 1, 'x') * y.dimshuffle(0, 'x', 1)
ygrad = T.batched_dot(x.dimshuffle(0, 2, 1), gz)
# x is a tensor3, y is a tensor3, grad is a tensor3
elif xdim == ydim == 3:
xgrad = T.batched_dot(gz, y.dimshuffle(0, 2, 1))
ygrad = T.batched_dot(x.dimshuffle(0, 2, 1), gz)
# If x or y contain broadcastable dimensions but only one of
# them know that a matching dimensions is broadcastable, the
# above code don't always return the right broadcast pattern.
# This cause problem down the road. See gh-1461.
if xgrad.broadcastable != x.broadcastable:
xgrad = T.patternbroadcast(xgrad, x.broadcastable)
if ygrad.broadcastable != y.broadcastable:
ygrad = T.patternbroadcast(ygrad, y.broadcastable)
return xgrad, ygrad
def R_op(self, inputs, eval_points):
# R_op for batched_dot(a, b) evaluted at c for a and d for b is
# simply batched_dot(c, b) + batched_dot(a, d)
assert len(inputs) == 2
assert len(eval_points) == 2
if eval_points[0] is None and eval_points[1] is None:
return [None]
debugger_available = config.compute_test_value != 'off'
if debugger_available:
try:
iv0 = theano.gof.op.get_test_value(inputs[0])
except AttributeError:
theano.gof.op.missing_test_message(
'first input passed to BatchedDot.R_op has no test value')
debugger_available = False
try:
iv1 = theano.gof.op.get_test_value(inputs[1])
except AttributeError:
theano.gof.op.missing_test_message(
'second input passed to BatchedDot.R_op has no test value')
debugger_available = False
if eval_points[0]:
try:
ev0 = theano.gof.op.get_test_value(eval_points[0])
except AttributeError:
theano.gof.op.missing_test_message(
'first eval point passed to BatchedDot.R_op '
'has no test value')
debugger_available = False
if eval_points[1]:
try:
ev1 = theano.gof.op.get_test_value(eval_points[1])
except AttributeError:
theano.gof.op.missing_test_message(
'second eval point passed to BatchedDot.R_op '
'has no test value')
debugger_available = False
if debugger_available:
input_values = [iv0, iv1]
eval_point_values = [ev0, ev1]
for i in xrange(2):
if eval_point_values[i] is not None and \
input_values[i].shape != eval_point_values[i].shape:
raise ValueError(
'input ' + str(i) + ' and eval_point ' + str(i) +
' to BatchedDot.R_op should have the same shape, but '
'their shapes are %s and %s, respectively' % (
str(input_values[i].shape),
str(eval_point_values[i].shape)))
if eval_points[0]:
t1 = self(eval_points[0], inputs[1])
if eval_points[1]:
t2 = self(inputs[0], eval_points[1])
if eval_points[0] and eval_points[1]:
return [t1 + t2]
elif eval_points[0]:
return [t1]
else:
return [t2]
def infer_shape(self, node, shapes):
for shape_ in shapes:
if len(shape_) not in (2, 3):
raise NotImplementedError()
xshp, yshp = shapes
return [xshp[:-1] + yshp[2:]]
batched_dot = BatchedDot()
# from opt import register_specialize, register_canonicalize
# @register_specialize
@local_optimizer([T.sub, T.add])
def local_print_as_we_go_along(node):
if node.op in (T.sub, T.add):
debugprint(node)
|
Weihonghao/ECM
|
Vpy34/lib/python3.5/site-packages/theano/tensor/blas.py
|
Python
|
agpl-3.0
| 92,128
|
[
"BLAST"
] |
52edf62ed62c78561bf06be2dc27bb8e2ef6e9a4f4f20ece0ba6c3bd1aac545c
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Handles function calls, by generating compiled function names and calls.
Note: this transformer does not rename the top level object being converted;
that is the caller's responsibility.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import namedtuple
import gast
from tensorflow.python.autograph.core import converter
from tensorflow.python.autograph.pyct import anno
from tensorflow.python.autograph.pyct import ast_util
from tensorflow.python.autograph.pyct import inspect_utils
from tensorflow.python.autograph.pyct import parser
from tensorflow.python.autograph.pyct import templates
from tensorflow.python.util import tf_inspect
class FunctionInfo(namedtuple('FunctionInfo', ('dtype',))):
pass
# TODO(mdan): Move this to config.py.
KNOWN_NUMPY_FUNCTIONS = {
('numpy', 'random', 'binomial'): FunctionInfo(dtype='tf.int64'),
}
# TODO(mdan): Get rid of these interfaces. Can now depend directly on Namer.
class FunctionNamer(object):
"""Describes the interface for CallTreeTransformer's namer."""
def compiled_function_name(self,
original_fqn,
live_entity=None,
owner_type=None):
"""Generate the name corresponding to the compiled version of a function.
Args:
original_fqn: string or tuple(string)
live_entity: Callable, the actual target function, if known.
owner_type: Optional object. If present, it indicates that the function is
a member of the given type.
Returns:
string, bool
"""
raise NotImplementedError()
def compiled_class_name(self, original_fqn, live_entity=None):
"""Generate the name corresponding to the compiled version of a class.
Args:
original_fqn: string or tuple(string)
live_entity: The actual target class, if known.
Returns:
string
"""
raise NotImplementedError()
# TODO(mdan): Rename to CallsTransformer.
class CallTreeTransformer(converter.Base):
"""Transforms the call tree by renaming transformed symbols."""
def _resolve_decorator_name(self, node):
"""Used to resolve decorator info."""
if isinstance(node, gast.Call):
return self._resolve_decorator_name(node.func)
if isinstance(node, gast.Name):
# TODO(mdan): Add test coverage for this branch.
return self.ctx.info.namespace.get(node.id)
if isinstance(node, gast.Attribute):
parent = self._resolve_decorator_name(node.value)
if parent is not None:
return getattr(parent, node.attr)
return None
raise ValueError(node)
def _try_resolve_target(self, node):
"""Works for methods of objects of known type."""
if anno.hasanno(node, 'live_val'):
return anno.getanno(node, 'live_val')
if isinstance(node, gast.Attribute) and anno.hasanno(node, 'type'):
owner_type = anno.getanno(node, 'type')
if hasattr(owner_type, node.attr):
return getattr(owner_type, node.attr)
else:
raise ValueError('Type "%s" has not attribute "%s". Is it dynamic?' %
(owner_type, node.attr))
return None
def _function_is_compilable(self, target_entity):
"""Determines whether an entity can be compiled at all."""
# TODO(mdan): This is just a placeholder. Implement.
return not inspect_utils.isbuiltin(target_entity)
def _should_compile(self, node, fqn):
"""Determines whether an entity should be compiled in the context."""
# TODO(mdan): Needs cleanup. We should remove the use of fqn altogether.
module_name = fqn[0]
for mod in self.ctx.program.uncompiled_modules:
if module_name.startswith(mod[0] + '.'):
return False
for i in range(1, len(fqn)):
if fqn[:i] in self.ctx.program.uncompiled_modules:
return False
# Check for local decorations
if anno.hasanno(node, 'graph_ready'):
return False
# The decorators themselves are not to be converted.
# If present, the decorators should appear as static functions.
target_entity = self._try_resolve_target(node.func)
if target_entity is not None:
# This may be reached when "calling" a callable attribute of an object.
# For example:
#
# self.fc = tf.keras.layers.Dense()
# self.fc()
#
for mod in self.ctx.program.uncompiled_modules:
if target_entity.__module__.startswith(mod[0] + '.'):
return False
# This attribute is set by the decorator itself.
# TODO(mdan): This may not play nicely with other wrapping decorators.
if hasattr(target_entity, '__pyct_is_compile_decorator'):
return False
if target_entity in self.ctx.program.options.strip_decorators:
return False
# Inspect the target function decorators. If any include a @convert
# or @graph_ready annotation, then they must be called as they are.
# TODO(mdan): This may be quite heavy.
# To parse and re-analyze each function for every call site could be quite
# wasteful. Maybe we could cache the parsed AST?
try:
target_node, _ = parser.parse_entity(target_entity)
target_node = target_node.body[0]
except TypeError:
# Functions whose source we cannot access are compilable (e.g. wrapped
# to py_func).
return True
for dec in target_node.decorator_list:
decorator_fn = self._resolve_decorator_name(dec)
if (decorator_fn is not None and
decorator_fn in self.ctx.program.options.strip_decorators):
return False
return True
def _rename_compilable_function(self, node):
assert anno.hasanno(node.func, 'live_val')
assert anno.hasanno(node.func, 'fqn')
target_entity = anno.getanno(node.func, 'live_val')
target_fqn = anno.getanno(node.func, 'fqn')
if not self._should_compile(node, target_fqn):
return node
if anno.hasanno(node, 'is_constructor'):
new_name = self.ctx.namer.compiled_class_name(
target_fqn, live_entity=target_entity)
do_rename = True
else:
if anno.hasanno(node.func, 'parent_type'):
owner_type = anno.getanno(node.func, 'parent_type')
else:
# Fallback - not reliable.
owner_type = inspect_utils.getmethodclass(target_entity)
new_name, do_rename = self.ctx.namer.compiled_function_name(
target_fqn, live_entity=target_entity, owner_type=owner_type)
if do_rename:
if target_entity is not None:
if tf_inspect.ismethod(target_entity):
# The renaming process will transform it into a regular function.
# TODO(mdan): Is this complete? How does it work with nested members?
node.args = [node.func.value] + node.args
node.func = templates.replace('func_name', func_name=new_name)[0]
return node
def _wrap_to_py_func_no_return(self, node):
# TODO(mdan): Properly handle varargs, etc.
template = """
ag__.utils.wrap_py_func(func, None, (args,), kwargs, True)
"""
return templates.replace(
template,
func=node.func,
args=node.args,
kwargs=ast_util.keywords_to_dict(node.keywords))
def _wrap_to_py_func_single_return(self, node, dtype):
# TODO(mdan): Properly handle varargs, etc.
template = """
ag__.utils.wrap_py_func(func, dtype, (args,), kwargs, False)
"""
return templates.replace_as_expression(
template,
func=node.func,
dtype=parser.parse_expression(dtype),
args=node.args,
kwargs=ast_util.keywords_to_dict(node.keywords))
def _insert_dynamic_conversion(self, node):
"""Inlines a dynamic conversion for a dynamic function."""
# TODO(mdan): Pass information on the statically compiled functions.
# Having access to the statically compiled functions can help avoid
# unnecessary compilation.
# For example, this would lead to function `a` being compiled twice:
#
# def a():
# v = b
# b()
# def b():
# a()
#
# This is really a problem with recursive calls, which currently can
# only be gated by a static condition, and should be rare.
# TODO(mdan): It probably makes sense to use dynamic conversion every time.
# Before we could convert all the time though, we'd need a reasonable
# caching mechanism.
template = """
ag__.converted_call(func, owner, options, args)
"""
if isinstance(node.func, gast.Attribute):
func = gast.Str(node.func.attr)
owner = node.func.value
else:
func = node.func
owner = parser.parse_expression('None')
call_expr = templates.replace(
template,
func=func,
owner=owner,
options=self.ctx.program.options.to_ast(self.ctx.info.namespace),
args=node.args)
new_call = call_expr[0].value
# TODO(mdan): Improve the template mechanism to better support this.
new_call.keywords = node.keywords
return new_call
def visit_Expr(self, node):
if isinstance(node.value, gast.Call):
if anno.hasanno(node.value.func, 'live_val'):
target_entity = anno.getanno(node.value.func, 'live_val')
if not self._function_is_compilable(target_entity):
if anno.hasanno(node.value.func, 'fqn'):
target_fqn = anno.getanno(node.value.func, 'fqn')
if not self._should_compile(node.value, target_fqn):
return node
node = self._wrap_to_py_func_no_return(node.value)
return node
# Only the case of py_func with no return value is special.
# Everything else is processed by visit_Call.
self.visit(node.value)
else:
self.generic_visit(node)
return node
def visit_Call(self, node):
# If the function call is wrapped by one of the marker decorators,
# consider it graph ready.
if anno.hasanno(node.func, 'live_val'):
target_entity = anno.getanno(node.func, 'live_val')
if target_entity in self.ctx.program.options.strip_decorators:
if len(node.args) < 1:
raise ValueError(
'Found call to decorator function "%s", but it had no arguments. '
'A decorator needs at least one positional argument.' %
target_entity)
anno.setanno(node.args[0], 'graph_ready', True)
self.generic_visit(node)
if anno.hasanno(node.func, 'live_val'):
target_entity = anno.getanno(node.func, 'live_val')
if anno.hasanno(node.func, 'fqn'):
target_fqn = anno.getanno(node.func, 'fqn')
else:
target_fqn = None
if self._function_is_compilable(target_entity):
node = self._rename_compilable_function(node)
elif target_fqn and target_fqn in KNOWN_NUMPY_FUNCTIONS:
# TODO(mdan): Should we replace these with equivalent TF ops instead?
node = self._wrap_to_py_func_single_return(
node, KNOWN_NUMPY_FUNCTIONS[target_fqn].dtype)
elif inspect_utils.isbuiltin(target_entity):
# Note: Any builtin that passed the builtins converter is assumed to be
# safe for graph mode.
return node
else:
raise NotImplementedError(
'py_func with return values (unknown function)')
else:
if ast_util.matches(node, 'super(_)'):
# super() calls are preserved. The class conversion mechanism will
# ensure that they return the correct value.
return node
if self.ctx.program.options.recursive:
node = self._insert_dynamic_conversion(node)
return node
def transform(node, ctx):
"""Transform function call to the compiled counterparts.
Args:
node: AST
ctx: EntityContext
Returns:
A tuple (node, new_names):
node: The transformed AST
new_names: set(string), containing any newly-generated names
"""
return CallTreeTransformer(ctx).visit(node)
|
alshedivat/tensorflow
|
tensorflow/python/autograph/converters/call_trees.py
|
Python
|
apache-2.0
| 12,664
|
[
"VisIt"
] |
d508d098e8fbd650a4af0c50ca56cfbcc28e3f717b8c1dd3662d06ceb18da9c1
|
''' Convienence methods on VTK routines only '''
import ddapp.vtkAll as vtk
import ddapp.vtkNumpy as vnp
from ddapp.shallowCopy import shallowCopy
import numpy as np
def thresholdPoints(polyData, arrayName, thresholdRange):
assert(polyData.GetPointData().GetArray(arrayName))
f = vtk.vtkThresholdPoints()
f.SetInput(polyData)
f.ThresholdBetween(thresholdRange[0], thresholdRange[1])
f.SetInputArrayToProcess(0,0,0, vtk.vtkDataObject.FIELD_ASSOCIATION_POINTS, arrayName)
f.Update()
return shallowCopy(f.GetOutput())
def transformPolyData(polyData, transform):
t = vtk.vtkTransformPolyDataFilter()
t.SetTransform(transform)
t.SetInput(shallowCopy(polyData))
t.Update()
return shallowCopy(t.GetOutput())
def computeDelaunay3D(polyData):
f = vtk.vtkDelaunay3D()
f.SetInput(polyData)
f.SetOffset(100.0)
f.Update()
surface = vtk.vtkGeometryFilter()
surface.SetInput(f.GetOutput())
surface.Update()
clean = vtk.vtkCleanPolyData()
clean.SetInput(surface.GetOutput())
clean.Update()
return shallowCopy(clean.GetOutput())
def computeDelaunay2D(polyData):
f = vtk.vtkDelaunay2D()
f.SetInput(polyData)
f.Update()
return shallowCopy(f.GetOutput())
def computeCentroid(polyData):
return np.average(vnp.getNumpyFromVtk(polyData, 'Points'), axis=0)
def appendPolyData(polyDataList):
assert len(polyDataList)
append = vtk.vtkAppendPolyData()
for polyData in polyDataList:
append.AddInput(polyData)
append.Update()
return shallowCopy(append.GetOutput())
def computeNormals(polyData, featureAngle=45):
normals = vtk.vtkPolyDataNormals()
normals.SetFeatureAngle(featureAngle)
normals.SetInput(polyData)
normals.Update()
return shallowCopy(normals.GetOutput())
def cleanPolyData(polyData):
clean = vtk.vtkCleanPolyData()
clean.SetInput(polyData)
clean.Update()
return shallowCopy(clean.GetOutput())
def hasNonFinitePoints(polyData, arrayName='Points'):
pts = vnp.getNumpyFromVtk(polyData, arrayName)
return np.isfinite(pts).any()
def labelNonFinitePoints(polyData, arrayName='Points'):
'''
adds is_nonfinite label to polyData. non finite includes nan and +/- inf.
'''
pts = vnp.getNumpyFromVtk(polyData, arrayName)
labels = np.logical_not(np.isfinite(pts)).any(axis=1)
vnp.addNumpyToVtk(polyData, np.array(labels, dtype=np.int32), 'is_nonfinite')
def removeNonFinitePoints(polyData, arrayName='Points'):
polyData = shallowCopy(polyData)
labelNonFinitePoints(polyData, arrayName)
return thresholdPoints(polyData, 'is_nonfinite', [0, 0])
|
gizatt/director
|
src/python/ddapp/filterUtils.py
|
Python
|
bsd-3-clause
| 2,663
|
[
"VTK"
] |
af3f72c340253b8a2d0136b3a12b2f40f14dd030d8a5b44140b6c91a1dd9235c
|
# Copyright (C) 2014 Sereina Riniker
#
# This file is part of the RDKit.
# The contents are covered by the terms of the BSD license
# which is included in the file license.txt, found at the root
# of the RDKit source tree.
#
""" Torsion Fingerprints (Deviation) (TFD)
According to a paper from Schulz-Gasch et al., JCIM, 52, 1499-1512 (2012).
"""
from rdkit import rdBase
from rdkit import RDConfig
from rdkit import Geometry
from rdkit import Chem
from rdkit.Chem import rdMolDescriptors
import math, os
def _doMatch(inv, atoms):
""" Helper function to check if all atoms in the list are the same
Arguments:
- inv: atom invariants (used to define equivalence of atoms)
- atoms: list of atoms to check
Return: boolean
"""
match = True
for i in range(len(atoms)-1):
for j in range(i+1, len(atoms)):
if (inv[atoms[i].GetIdx()] != inv[atoms[j].GetIdx()]):
match = False
return match
return match
def _doNotMatch(inv, atoms):
""" Helper function to check if all atoms in the list are NOT the same
Arguments:
- inv: atom invariants (used to define equivalence of atoms)
- atoms: list of atoms to check
Return: boolean
"""
match = True
for i in range(len(atoms)-1):
for j in range(i+1, len(atoms)):
if (inv[atoms[i].GetIdx()] == inv[atoms[j].GetIdx()]):
match = False
return match
return match
def _doMatchExcept1(inv, atoms):
""" Helper function to check if two atoms in the list are the same,
and one not
Note: Works only for three atoms
Arguments:
- inv: atom invariants (used to define equivalence of atoms)
- atoms: list of atoms to check
Return: atom that is different
"""
if len(atoms) != 3:
raise ValueError("Number of atoms must be three")
a1 = atoms[0].GetIdx()
a2 = atoms[1].GetIdx()
a3 = atoms[2].GetIdx()
if (inv[a1] == inv[a2] and inv[a1] != inv[a3] and inv[a2] != inv[a3]):
return atoms[2]
elif (inv[a1] != inv[a2] and inv[a1] == inv[a3] and inv[a2] != inv[a3]):
return atoms[1]
elif (inv[a1] != inv[a2] and inv[a1] != inv[a3] and inv[a2] == inv[a3]):
return atoms[0]
return None
def _getAtomInvariantsWithRadius(mol, radius):
""" Helper function to calculate the atom invariants for each atom
with a given radius
Arguments:
- mol: the molecule of interest
- radius: the radius for the Morgan fingerprint
Return: list of atom invariants
"""
inv = []
for i in range(mol.GetNumAtoms()):
info = {}
fp = rdMolDescriptors.GetMorganFingerprint(mol, radius, fromAtoms=[i], bitInfo=info)
for k in info.keys():
if info[k][0][1] == radius:
inv.append(k)
return inv
def _getHeavyAtomNeighbors(atom1, aid2=-1):
""" Helper function to calculate the number of heavy atom neighbors.
Arguments:
- atom1: the atom of interest
- aid2: atom index that should be excluded from neighbors (default: none)
Return: a list of heavy atom neighbors of the given atom
"""
if aid2 < 0:
return [n for n in atom1.GetNeighbors() if n.GetSymbol()!='H']
else:
return [n for n in atom1.GetNeighbors() if (n.GetSymbol()!='H' and n.GetIdx()!=aid2)]
def _getIndexforTorsion(neighbors, inv):
""" Helper function to calculate the index of the reference atom for
a given atom
Arguments:
- neighbors: list of the neighbors of the atom
- inv: atom invariants
Return: list of atom indices as reference for torsion
"""
if len(neighbors) == 1: # atom has only one neighbor
return [neighbors[0]]
elif _doMatch(inv, neighbors): # atom has all symmetric neighbors
return neighbors
elif _doNotMatch(inv, neighbors): # atom has all different neighbors
# simply use the first neighbor
return [neighbors[0]]
at = _doMatchExcept1(inv, neighbors) # two neighbors the same, one different
if at is None:
raise ValueError("Atom neighbors are either all the same or all different")
return [at]
def CalculateTorsionLists(mol, maxDev='equal', symmRadius=2):
""" Calculate a list of torsions for a given molecule. For each torsion
the four atom indices are determined and stored in a set.
Arguments:
- mol: the molecule of interest
- maxDev: maximal deviation used for normalization
'equal': all torsions are normalized using 180.0 (default)
'spec': each torsion is normalized using its specific
maximal deviation as given in the paper
- symmRadius: radius used for calculating the atom invariants
(default: 2)
Return: two lists of torsions: non-ring and ring torsions
"""
if maxDev not in ['equal', 'spec']:
raise ValueError("maxDev must be either equal or spec")
# get non-terminal, non-cyclic bonds
bonds = []
for b in mol.GetBonds():
if b.IsInRing(): continue
a1 = b.GetBeginAtomIdx()
a2 = b.GetEndAtomIdx()
nb1 = _getHeavyAtomNeighbors(b.GetBeginAtom(), a2)
nb2 = _getHeavyAtomNeighbors(b.GetEndAtom(), a1)
if nb1 and nb2: # no terminal bonds
bonds.append((b, a1, a2, nb1, nb2))
# get atom invariants
if symmRadius > 0:
inv = _getAtomInvariantsWithRadius(mol, symmRadius)
else:
inv = rdMolDescriptors.GetConnectivityInvariants(mol)
# get the torsions
tors_list = [] # to store the atom indices of the torsions
for b, a1, a2, nb1, nb2 in bonds:
d1 = _getIndexforTorsion(nb1, inv)
d2 = _getIndexforTorsion(nb2, inv)
if len(d1) == 1 and len(d2) == 1: # case 1, 2, 4, 5, 7, 10, 16, 12, 17, 19
tors_list.append(([(d1[0].GetIdx(), a1, a2, d2[0].GetIdx())], 180.0))
elif len(d1) == 1: # case 3, 6, 8, 13, 20
if len(nb2) == 2: # two neighbors
tors_list.append(([(d1[0].GetIdx(), a1, a2, nb.GetIdx()) for nb in d2], 90.0))
else: # three neighbors
tors_list.append(([(d1[0].GetIdx(), a1, a2, nb.GetIdx()) for nb in d2], 60.0))
elif len(d2) == 1: # case 3, 6, 8, 13, 20
if len(nb1) == 2:
tors_list.append(([(nb.GetIdx(), a1, a2, d2[0].GetIdx()) for nb in d1], 90.0))
else: # three neighbors
tors_list.append(([(nb.GetIdx(), a1, a2, d2[0].GetIdx()) for nb in d1], 60.0))
else: # both symmetric
tmp = []
for n1 in d1:
for n2 in d2:
tmp.append((n1.GetIdx(), a1, a2, n2.GetIdx()))
if len(nb1) == 2 and len(nb2) == 2: # case 9
tors_list.append((tmp, 90.0))
elif len(nb1) == 3 and len(nb2) == 3: # case 21
tors_list.append((tmp, 60.0))
else: # case 15
tors_list.append((tmp, 30.0))
# maximal possible deviation for non-cyclic bonds
if maxDev == 'equal':
tors_list = [(t,180.0) for t,d in tors_list]
# rings
rings = Chem.GetSymmSSSR(mol)
tors_list_rings = []
for r in rings:
# get the torsions
tmp = []
num = len(r)
maxdev = 180.0 * math.exp(-0.025*(num-14)*(num-14))
for i in range(len(r)):
tmp.append((r[i], r[(i+1)%num], r[(i+2)%num], r[(i+3)%num]))
tors_list_rings.append((tmp,maxdev))
return tors_list, tors_list_rings
def _getTorsionAtomPositions(atoms, conf):
""" Helper function to retrieve the coordinates of the four atoms
in a torsion
Arguments:
- atoms: list with the four atoms
- conf: conformation of the molecule
Return: Point3D objects of the four atoms
"""
if len(atoms) != 4:
raise ValueError("List must contain exactly four atoms")
p1 = conf.GetAtomPosition(atoms[0])
p2 = conf.GetAtomPosition(atoms[1])
p3 = conf.GetAtomPosition(atoms[2])
p4 = conf.GetAtomPosition(atoms[3])
return p1, p2, p3, p4
def CalculateTorsionAngles(mol, tors_list, tors_list_rings, confId=-1):
""" Calculate the torsion angles for a list of non-ring and
a list of ring torsions.
Arguments:
- mol: the molecule of interest
- tors_list: list of non-ring torsions
- tors_list_rings: list of ring torsions
- confId: index of the conformation (default: first conformer)
Return: list of torsion angles
"""
torsions = []
conf = mol.GetConformer(confId)
for t,maxdev in tors_list:
if len(t) == 1:
t = t[0]
p1, p2, p3, p4 = _getTorsionAtomPositions(t, conf)
tors = (Geometry.ComputeSignedDihedralAngle(p1, p2, p3, p4)/math.pi)*180.0
if tors < 0: tors += 360.0 # angle between 0 and 360
else:
# loop over torsions and take minimum
tors = 360.0
for t2 in t:
p1, p2, p3, p4 = _getTorsionAtomPositions(t2, conf)
tmp = (Geometry.ComputeSignedDihedralAngle(p1, p2, p3, p4)/math.pi)*180.0
if tmp < 0: tmp += 360.0 # angle between 0 and 360
if tmp < tors: tors = tmp
torsions.append((tors, maxdev))
# rings
for t,maxdev in tors_list_rings:
num = len(t)
# loop over torsions and sum them up
tors = 0
for t2 in t:
p1, p2, p3, p4 = _getTorsionAtomPositions(t2, conf)
tmp = abs((Geometry.ComputeSignedDihedralAngle(p1, p2, p3, p4)/math.pi)*180.0)
tors += tmp
tors /= num
torsions.append((tors, maxdev))
return torsions
def _findCentralBond(mol, distmat):
""" Helper function to identify the atoms of the most central bond.
Arguments:
- mol: the molecule of interest
- distmat: distance matrix of the molecule
Return: atom indices of the two most central atoms (in order)
"""
from numpy import std
# get the most central atom = atom with the least STD of shortest distances
stds = []
for i in range(mol.GetNumAtoms()):
# only consider non-terminal atoms
if len(_getHeavyAtomNeighbors(mol.GetAtomWithIdx(i))) < 2: continue
tmp = [d for d in distmat[i]]
tmp.pop(i)
stds.append((std(tmp), i))
stds.sort()
aid1 = stds[0][1]
# find the second most central bond that is bonded to aid1
i = 1
while 1:
if mol.GetBondBetweenAtoms(aid1, stds[i][1]) is None:
i += 1
else:
aid2 = stds[i][1]
break
return aid1, aid2 # most central atom comes first
def _calculateBeta(mol, distmat, aid1):
""" Helper function to calculate the beta for torsion weights
according to the formula in the paper.
w(dmax/2) = 0.1
Arguments:
- mol: the molecule of interest
- distmat: distance matrix of the molecule
- aid1: atom index of the most central atom
Return: value of beta (float)
"""
# get all non-terminal bonds
bonds = []
for b in mol.GetBonds():
nb1 = _getHeavyAtomNeighbors(b.GetBeginAtom())
nb2 = _getHeavyAtomNeighbors(b.GetEndAtom())
if len(nb2) > 1 and len(nb2) > 1:
bonds.append(b)
# get shortest distance
dmax = 0
for b in bonds:
bid1 = b.GetBeginAtom().GetIdx()
bid2 = b.GetEndAtom().GetIdx()
d = max([distmat[aid1][bid1], distmat[aid1][bid2]])
if (d > dmax): dmax = d
dmax2 = dmax/2.0
beta = -math.log(0.1)/(dmax2*dmax2)
return beta
def CalculateTorsionWeights(mol, aid1=-1, aid2=-1):
""" Calculate the weights for the torsions in a molecule.
By default, the highest weight is given to the bond
connecting the two most central atoms.
If desired, two alternate atoms can be specified (must
be connected by a bond).
Arguments:
- mol: the molecule of interest
- aid1: index of the first atom (default: most central)
- aid2: index of the second atom (default: second most cenral)
Return: list of torsion weights (both non-ring and ring)
"""
# get distance matrix
distmat = Chem.GetDistanceMatrix(mol)
if aid1 < 0 and aid2 < 0:
aid1, aid2 = _findCentralBond(mol, distmat)
else:
b = mol.GetBondBetweenAtoms(aid1, aid2)
if b is None:
raise ValueError("Specified atoms must be connected by a bond.")
# calculate beta according to the formula in the paper
beta = _calculateBeta(mol, distmat, aid1)
# get non-terminal, non-cyclic bonds
bonds = []
for b in mol.GetBonds():
if b.IsInRing(): continue
nb1 = _getHeavyAtomNeighbors(b.GetBeginAtom())
nb2 = _getHeavyAtomNeighbors(b.GetEndAtom())
if len(nb1) > 1 and len(nb2) > 1:
bonds.append(b)
# get shortest paths and calculate weights
weights = []
for b in bonds:
bid1 = b.GetBeginAtom().GetIdx()
bid2 = b.GetEndAtom().GetIdx()
if ((bid1, bid2) == (aid1, aid2)
or (bid2, bid1) == (aid1, aid2)): # if it's the most central bond itself
d = 0
else:
# get shortest distance between the 4 atoms and add 1 to get bond distance
d = min(distmat[aid1][bid1], distmat[aid1][bid2], distmat[aid2][bid1], distmat[aid2][bid2])+1
w = math.exp(-beta*(d*d))
weights.append(w)
## RINGS
rings = mol.GetRingInfo()
for r in rings.BondRings():
# get shortest distances
tmp = []
num = len(r)
for bidx in r:
b = mol.GetBondWithIdx(bidx)
bid1 = b.GetBeginAtomIdx()
bid2 = b.GetEndAtomIdx()
# get shortest distance between the 4 atoms and add 1 to get bond distance
d = min(distmat[aid1][bid1], distmat[aid1][bid2], distmat[aid2][bid1], distmat[aid2][bid2])+1
tmp.append(d)
# calculate weights and append to list
# Note: the description in the paper is not very clear, the following
# formula was found to give the same weights as shown in Fig. 1
# For a ring of size N: w = N/2 * exp(-beta*(sum(w of each bond in ring)/N)^2)
w = sum(tmp)/float(num)
w = math.exp(-beta*(w*w))
weights.append(w*(num/2.0))
return weights
def CalculateTFD(torsions1, torsions2, weights=None):
""" Calculate the torsion deviation fingerprint (TFD) given two lists of
torsion angles.
Arguments;
- torsions1: torsion angles of conformation 1
- torsions2: torsion angles of conformation 2
- weights: list of torsion weights (default: None)
Return: TFD value (float)
"""
if len(torsions1) != len(torsions2):
raise ValueError("List of torsions angles must have the same size.")
# calculate deviations and normalize (divide by max. possible deviation)
deviations = []
for t1, t2 in zip(torsions1, torsions2):
diff = abs(t1[0]-t2[0])
if (360.0-diff) < diff: # we do not care about direction
diff = 360.0 - diff
deviations.append(diff/t1[1])
# do we use weights?
if weights is not None:
if len(weights) != len(torsions1):
raise ValueError("List of torsions angles and weights must have the same size.")
deviations = [d*w for d,w in zip(deviations, weights)]
sum_weights = sum(weights)
else:
sum_weights = len(deviations)
tfd = sum(deviations)
if sum_weights != 0: # avoid division by zero
tfd /= sum_weights
return tfd
# some wrapper functions
def GetTFDBetweenConformers(mol, confIds1, confIds2, useWeights=True, maxDev='equal', symmRadius=2):
""" Wrapper to calculate the TFD between two list of conformers
of a molecule
Arguments:
- mol: the molecule of interest
- confIds1: first list of conformer indices
- confIds2: second list of conformer indices
- useWeights: flag for using torsion weights in the TFD calculation
- maxDev: maximal deviation used for normalization
'equal': all torsions are normalized using 180.0 (default)
'spec': each torsion is normalized using its specific
maximal deviation as given in the paper
- symmRadius: radius used for calculating the atom invariants
(default: 2)
Return: list of TFD values
"""
tl, tlr = CalculateTorsionLists(mol, maxDev=maxDev, symmRadius=symmRadius)
torsions1 = [CalculateTorsionAngles(mol, tl, tlr, confId=cid) for cid in confIds1]
torsions2 = [CalculateTorsionAngles(mol, tl, tlr, confId=cid) for cid in confIds2]
tfd = []
if useWeights:
weights = CalculateTorsionWeights(mol)
for t1 in torsions1:
for t2 in torsions2:
tfd.append(CalculateTFD(t1, t2, weights=weights))
else:
for t1 in torsions1:
for t2 in torsions2:
tfd.append(CalculateTFD(t1, t2))
return tfd
def GetTFDBetweenMolecules(mol1, mol2, confIds1=-1, confIds2=-1, useWeights=True, maxDev='equal', symmRadius=2):
""" Wrapper to calculate the TFD between two list of conformers
of two molecules.
Important: The two molecules must be instances of the same molecule
Arguments:
- mol1: first instance of the molecule of interest
- mol2: second instance the molecule of interest
- confIds1: list of conformer indices from mol1 (default: first conformer)
- confIds2: list of conformer indices from mol2 (default: first conformer)
- useWeights: flag for using torsion weights in the TFD calculation
- maxDev: maximal deviation used for normalization
'equal': all torsions are normalized using 180.0 (default)
'spec': each torsion is normalized using its specific
maximal deviation as given in the paper
- symmRadius: radius used for calculating the atom invariants
(default: 2)
Return: list of TFD values
"""
if (Chem.MolToSmiles(mol1) != Chem.MolToSmiles(mol2)):
raise ValueError("The two molecules must be instances of the same molecule!")
tl, tlr = CalculateTorsionLists(mol1, maxDev=maxDev, symmRadius=symmRadius)
# first molecule
if confIds1 < 0:
torsions1 = [CalculateTorsionAngles(mol1, tl, tlr)]
else:
torsions1 = [CalculateTorsionAngles(mol1, tl, tlr, confId=cid) for cid in confIds1]
# second molecule
if confIds2 < 0:
torsions2 = [CalculateTorsionAngles(mol2, tl, tlr)]
else:
torsions2 = [CalculateTorsionAngles(mol2, tl, tlr, confId=cid) for cid in confIds2]
tfd = []
if useWeights:
weights = CalculateTorsionWeights(mol1)
for t1 in torsions1:
for t2 in torsions2:
tfd.append(CalculateTFD(t1, t2, weights=weights))
else:
for t1 in torsions1:
for t2 in torsions2:
tfd.append(CalculateTFD(t1, t2))
return tfd
def GetTFDMatrix(mol, useWeights=True, maxDev='equal', symmRadius=2):
""" Wrapper to calculate the matrix of TFD values for the
conformers of a molecule.
Arguments:
- mol: the molecule of interest
- useWeights: flag for using torsion weights in the TFD calculation
- maxDev: maximal deviation used for normalization
'equal': all torsions are normalized using 180.0 (default)
'spec': each torsion is normalized using its specific
maximal deviation as given in the paper
- symmRadius: radius used for calculating the atom invariants
(default: 2)
Return: matrix of TFD values
Note that the returned matrix is symmetrical, i.e. it is the
lower half of the matrix, e.g. for 5 conformers:
matrix = [ a,
b, c,
d, e, f,
g, h, i, j]
"""
tl, tlr = CalculateTorsionLists(mol, maxDev=maxDev, symmRadius=symmRadius)
numconf = mol.GetNumConformers()
torsions = [CalculateTorsionAngles(mol, tl, tlr, confId=conf.GetId()) for conf in mol.GetConformers()]
tfdmat = []
if useWeights:
weights = CalculateTorsionWeights(mol)
for i in range(0, numconf):
for j in range(0, i):
tfdmat.append(CalculateTFD(torsions[i], torsions[j], weights=weights))
else:
for i in range(0, numconf):
for j in range(0, i):
tfdmat.append(CalculateTFD(torsions[i], torsions[j]))
return tfdmat
|
AlexanderSavelyev/rdkit
|
rdkit/Chem/TorsionFingerprints.py
|
Python
|
bsd-3-clause
| 19,695
|
[
"RDKit"
] |
10c10c8ca90ddf7d28555ca209f49ab32dd5c4dcb985f2722d28ef6fcd20e401
|
import FFPopSim as h
import numpy as np
from matplotlib import pyplot as plt
import random as rd
from Bio import Phylo
print "This script is meant to illustrate and explore the effect of\n\
positive selection on genealogies in asexual and sexual populations. \n\n\
Simulations are performed using an infinite sites model with L segregating\n\
sites at which mutations with identical beneficial effect are injected.\n\n"
#suggested values
#neutral asexual: N=100 s=0.00001 r=0.0
#selected asexual: N=10000 s=0.01 r=0.0
#selected sexual: N=1000 s=0.01 r=1.0
L = 1000 #number of segregating sites
s = 1e-2 #single site effect
N = 10000 #population size
r = 0.0 #outcrossing rate
sample_size=30 #number of individuals whose genealogy is looked at
nsamples = 3 #number of trees
burnin = 2000 #either ~5*N or 5/s, depending on whether coalescence is dominated by drift or draft
dt = 1000 #time between samples
#set up population, switch on infinite sites mode
pop=h.haploid_highd(L, all_polymorphic=True)
#set the population size via the carrying capacity
pop.carrying_capacity= N
#set the crossover rate, outcrossing_rate and recombination model
pop.outcrossing_rate = r
pop.recombination_model = h.CROSSOVERS
pop.crossover_rate = 1.0/pop.L
#set the effect sizes of the mutations that are injected (the same at each site in this case)
pop.set_fitness_additive(np.ones(L)*s)
#track the genealogy at a central locus L/2 (which one doesn't matter in the asexual case)
pop.track_locus_genealogy([L/2])
#initialize the populations
pop.set_wildtype(pop.carrying_capacity)
print "Population parameters:"
pop.status()
#burn in
print "\nEquilibrate:"
while pop.generation<burnin:
print "Burn in: at", pop.generation, "out of", burnin, "generations"
pop.evolve(100)
print "\nPlot coalescent trees:"
fig=plt.figure(figsize=(7,10))
fig.suptitle("".join(map(str,['N=',N,' r=',r,' L=',L, ' s=',s])), fontsize=18)
for si in xrange(nsamples):
print "sample",si,"out of",nsamples
#evolve a while before sampling the next tree
pop.evolve(dt)
#draw a sample from the population, convert its genealogy to a BioPython tree object and plot
tree = pop.genealogy.get_tree(L/2)
subtree = tree.create_subtree_from_keys(rd.sample(tree.leafs,sample_size)).to_Biopython_tree()
subtree.ladderize()
plt.subplot(3,1,si+1)
Phylo.draw(subtree,label_func=lambda x:"")
plt.draw()
plt.savefig("".join(map(str,['tree_', 'N=',N,'_r=',r,'_L=',L, '_s=',s,'.pdf'])))
|
wrshoemaker/ffpopsim
|
examples/genealogies_with_selection.py
|
Python
|
gpl-3.0
| 2,468
|
[
"Biopython"
] |
d875232d65972c6a23545a1ab7b77a316c736e518e85a3869cb5cd21e3f553ac
|
# -*- mode: python; coding: utf-8 -*-
#
# Copyright (C) 2017 <contact@redhat.com>
#
# Author: Loic Dachary <loic@dachary.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import division
import argparse
import collections
import copy
import json
import logging
import re
import struct
import textwrap
from crush import main
from crush import analyze
from crush.ceph import convert
from crush import Crush, LibCrush
log = logging.getLogger(__name__)
class MappingError(Exception):
pass
class UnsupportedError(Exception):
pass
class HealthError(Exception):
pass
class CephReport(object):
def parse_report(self, report):
if report['health']['overall_status'] != 'HEALTH_OK':
raise HealthError("expected health overall_status == HEALTH_OK but got " +
report['health']['overall_status'] + "instead")
v = report['version'].split('.')
if v[0] == "0":
if v[1] == '94':
version = 'h'
elif v[1] == '87':
version = 'g'
elif v[1] == '80':
version = 'f'
else:
version = chr(ord('a') + int(v[0]) - 1)
crushmap = CephCrushmapConverter().parse_ceph(report['crushmap'],
version=version,
recover_choose_args=False)
mappings = collections.defaultdict(lambda: {})
for pg_stat in report['pgmap']['pg_stats']:
mappings[pg_stat['pgid']] = pg_stat['acting']
ruleset2name = {}
for rule in crushmap['private']['rules']:
ruleset2name[rule['ruleset']] = rule['rule_name']
c = LibCrush(backward_compatibility=True)
c.parse(crushmap)
name2id = {}
def collect_items(children):
for child in children:
if 'id' in child:
name2id[child['name']] = child['id']
collect_items(child.get('children', []))
collect_items(crushmap['trees'])
weights = Crush.parse_osdmap_weights(report['osdmap'])
for osd in report['osdmap']["osds"]:
if osd["primary_affinity"] != 1.0:
raise UnsupportedError(
"osd." + str(osd["osd"]) + " primary affinity is != 1.0")
failed_mapping = False
for pool in report['osdmap']['pools']:
if pool['type'] != 1:
raise UnsupportedError(
"pool " + pool['pool_name'] + " is type " + str(pool['type']) +
" is not supported, only type == 1 (replicated)")
if pool['object_hash'] != 2:
raise UnsupportedError(
"pool " + pool['pool_name'] + " object_hash " + str(pool['object_hash']) +
" is not supported, only object_hash == 2 (rjenkins)")
if pool['flags_names'] != 'hashpspool':
raise UnsupportedError(
"pool " + pool['pool_name'] + " has flags_names " +
"'" + str(pool['flags_names']) + "'" +
" is no supported, only hashpspool")
ruleset = pool['crush_ruleset']
if str(ruleset) in crushmap.get('choose_args', {}):
choose_args = str(ruleset)
else:
choose_args = None
rule = ruleset2name[ruleset]
size = pool['size']
log.info("verifying pool {} pg_num {} pgp_num {}".format(
pool['pool'], pool['pg_num'], pool['pg_placement_num']))
values = LibCrush().ceph_pool_pps(pool['pool'],
pool['pg_num'],
pool['pg_placement_num'])
kwargs = {
"rule": str(rule),
"replication_count": size,
}
if choose_args:
kwargs["choose_args"] = choose_args
if weights:
kwargs["weights"] = weights
for (name, pps) in values.items():
if name not in mappings:
failed_mapping = True
log.error(name + " is not in pgmap")
continue
kwargs["value"] = pps
mapped = c.map(**kwargs)
osds = [name2id[x] for x in mapped]
if osds != mappings[name]:
failed_mapping = True
log.error("{} map to {} instead of {}".format(
name, osds, mappings[name]))
continue
if failed_mapping:
raise MappingError("some mapping failed, please file a bug at "
"http://libcrush.org/main/python-crush/issues/new")
crushmap = CephCrushmapConverter().parse_ceph(report['crushmap'],
version=version,
recover_choose_args=True)
crushmap['private']['pools'] = report['osdmap']['pools']
crushmap['private']['version'] = version
return crushmap
class CephTunablesConverter(object):
known = set([
'choose_local_tries',
'choose_local_fallback_tries',
'chooseleaf_vary_r',
'chooseleaf_descend_once',
'straw_calc_version',
'choose_total_tries',
])
@staticmethod
def read_tunables(tunables, version):
known = copy.copy(CephTunablesConverter.known)
out = {}
if version >= 'j':
known.add('chooseleaf_stable')
else:
out['chooseleaf_stable'] = 0
for (k, v) in tunables.items():
if k in known:
out[k] = v
return out
@staticmethod
def rewrite_tunables_txt(tunables, path, version):
known = copy.copy(CephTunablesConverter.known)
if version >= 'j':
known.add('chooseleaf_stable')
lines = list(filter(lambda l: not re.match('^tunable ', l), open(path).readlines()))
for k in sorted(tunables.keys()):
if k in known:
lines.insert(0, 'tunable ' + k + ' ' + str(tunables[k]) + '\n')
open(path, 'w').write("".join(lines))
class CephCrushmapConverter(object):
def convert_item(self, item, ceph):
if item['id'] >= 0:
return {
"weight": item['weight'],
"id": item['id'],
"name": ceph['id2name'][item['id']],
}
else:
return {
"weight": item['weight'],
"reference_id": item['id'],
}
def convert_bucket(self, bucket, ceph):
b = {
"weight": bucket['weight'],
"id": bucket['id'],
"name": bucket['name'],
"algorithm": bucket['alg'],
"type": bucket['type_name'],
}
items = bucket.get('items', [])
children = []
last_pos = -1
for item in items:
# the actual pos value does not change the mapping
# when there is an empty item (we do not store pos)
# but the order of the items is important and we
# need to assert that the list is ordered
assert last_pos < item['pos']
last_pos = item['pos']
children.append(self.convert_item(item, ceph))
b['children'] = children
return b
def collect_items(self, children, ceph):
for child in children:
if 'id' in child:
ceph['id2item'][child['id']] = child
self.collect_items(child.get('children', []), ceph)
def dereference(self, children, ceph):
for i in range(len(children)):
child = children[i]
if 'reference_id' in child:
id = child['reference_id']
new_child = copy.copy(ceph['id2item'][id])
new_child['weight'] = child['weight']
ceph['is_child'].add(id)
children[i] = new_child
self.dereference(child.get('children', []), ceph)
@staticmethod
def recover_choose_args(ceph):
name2id = {}
id2bucket = {}
for bucket in ceph['buckets']:
name2id[bucket['name']] = bucket['id']
id2bucket[bucket['id']] = bucket
buckets = []
name2target_weights = {}
has_target_weight = False
for bucket in ceph['buckets']:
log.debug(str(bucket))
if bucket['name'].endswith('-target-weight'):
has_target_weight = True
name = bucket['name'][:-14]
target_weights = {}
for i in bucket['items']:
if i['id'] < 0:
c = id2bucket[i['id']]
assert c['name'].endswith('-target-weight')
child_name = c['name'][:-14]
id = name2id[child_name]
else:
id = i['id']
target_weights[id] = i['weight']
name2target_weights[name] = target_weights
else:
buckets.append(bucket)
if not has_target_weight:
return
choose_args = []
for bucket in buckets:
if bucket['name'] in name2target_weights:
target_weights = name2target_weights[bucket['name']]
weight_set = []
for child in bucket['items']:
if child['id'] in target_weights:
weight_set.append(child['weight'] / 0x10000)
child['weight'] = target_weights[child['id']]
else:
weight_set.append(0)
choose_args.append({
'bucket_id': bucket['id'],
'weight_set': [weight_set],
})
ceph['buckets'] = buckets
ceph['choose_args'] = {" placeholder ": choose_args}
def convert_buckets(self, ceph, recover_choose_args):
ceph['is_child'] = set()
ceph['id2item'] = {}
if recover_choose_args:
self.recover_choose_args(ceph)
converted = [self.convert_bucket(r, ceph) for r in ceph['buckets']]
self.collect_items(converted, ceph)
self.dereference(converted, ceph)
return list(filter(lambda c: c['id'] not in ceph['is_child'], converted))
def convert_rule(self, ceph_rule, ceph):
name = ceph_rule['rule_name']
rule = []
for ceph_step in ceph_rule['steps']:
if 'opcode' in ceph_step:
if ceph_step['opcode'] in (10, 11, 12, 13):
id2name = {
10: 'set_choose_local_tries',
11: 'set_choose_local_fallback_tries',
12: 'set_chooseleaf_vary_r',
13: 'set_chooseleaf_stable',
}
step = [id2name[ceph_step['opcode']], ceph_step['arg1']]
else:
assert 0, "unexpected rule opcode " + str(ceph_step['opcode'])
elif 'op' in ceph_step:
if ceph_step['op'] == 'take':
step = [ceph_step['op'], ceph_step['item_name']]
elif ceph_step['op'] in ('chooseleaf_firstn',
'choose_firstn',
'chooseleaf_indep',
'choose_indep'):
(choose, how) = ceph_step['op'].split('_')
if ceph['type2id'][ceph_step['type']] == 0:
type = 0
else:
type = ceph_step['type']
step = [choose, how, ceph_step['num'], 'type', type]
elif ceph_step['op'] in ('set_choose_local_tries',
'set_choose_local_fallback_tries',
'set_chooseleaf_vary_r',
'set_chooseleaf_stable',
'set_choose_tries',
'set_chooseleaf_tries'):
step = [ceph_step['op'], ceph_step['num']]
elif ceph_step['op'] == 'emit':
step = ['emit']
elif ceph_step['op'] == 'noop':
pass
else:
assert 0, "unexpected rule op " + str(ceph_step['op'])
else:
assert 0, "no op or opcode found"
rule.append(step)
return (name, rule)
def convert_choose_args(self, ceph):
choose_args_map = copy.deepcopy(ceph['choose_args'])
for (name, choose_args) in choose_args_map.items():
for choose_arg in choose_args:
if 'weight_set' in choose_arg:
choose_arg['weight_set'] = [
[int(x * 0x10000) for x in weights]
for weights in choose_arg['weight_set']
]
return choose_args_map
def parse_ceph(self, ceph, version, recover_choose_args):
ceph['id2name'] = {d['id']: d['name'] for d in ceph['devices']}
ceph['type2id'] = {t['name']: t['type_id'] for t in ceph['types']}
j = {
'private': {},
}
j['types'] = ceph['types']
j['trees'] = self.convert_buckets(ceph, recover_choose_args)
j['rules'] = {}
for ceph_rule in ceph['rules']:
(name, rule) = self.convert_rule(ceph_rule, ceph)
j['rules'][name] = rule
j['private']['rules'] = ceph['rules']
j['tunables'] = CephTunablesConverter.read_tunables(ceph['tunables'], version)
j['private']['tunables'] = ceph['tunables']
if 'choose_args' in ceph:
j['choose_args'] = self.convert_choose_args(ceph)
return j
class CephCrush(Crush):
#
# reading a crushmap from a file
#
@staticmethod
def _is_ceph_file(something):
fmt = "I"
crush_magic = 0x00010000
head = open(something, mode='rb').read(struct.calcsize(fmt))
if struct.unpack(fmt, head)[0] == crush_magic:
return True
content = open(something).read()
if (re.search("^device ", content, re.MULTILINE) and
re.search("^type ", content, re.MULTILINE)):
return True
return False
@staticmethod
def _convert_from_file(something):
if CephCrush._is_ceph_file(something):
crushmap = LibCrush().ceph_read(something)
return (json.loads(crushmap), 'ceph-json')
else:
with open(something) as f_json:
crushmap = json.load(f_json)
log.debug("_detect_file_format: valid json file")
if 'devices' in crushmap: # Ceph json format
return (crushmap, 'ceph-json')
elif 'cluster_fingerprint' in crushmap:
return (crushmap, 'ceph-report')
return (crushmap, 'python-crush-json')
@staticmethod
def _convert_to_dict(something):
if type(something) in (dict, collections.OrderedDict):
if 'devices' in something:
return (something, 'ceph-json')
elif 'cluster_fingerprint' in something:
return (something, 'ceph-report')
return (something, 'python-crush-json')
else:
return CephCrush._convert_from_file(something)
def _convert_to_crushmap(self, something):
(something, format) = CephCrush._convert_to_dict(something)
if format == 'ceph-json':
version = something.get('private', {}).get('version', 'l')
crushmap = CephCrushmapConverter().parse_ceph(something,
version=version,
recover_choose_args=True)
elif format == 'ceph-report':
crushmap = CephReport().parse_report(something)
else:
crushmap = something
return crushmap
#
# writing a crushmap to a file
#
@staticmethod
def choose_args_int_index(crushmap):
if 'choose_args' not in crushmap:
return crushmap
crushmap['choose_args'] = {
int(k): v for (k, v) in crushmap['choose_args'].items()
}
return crushmap
def ceph_version_compat(self):
#
# sanity checks
#
if self.c.ceph_incompat():
raise Exception("choose_args cannot be encoded for a version lower than luminous")
self._merge_choose_args()
#
# create the shadow trees with the target weights
#
self.max_bucket_id = min(self._id2item.keys())
def rename(bucket):
if 'children' not in bucket:
return
self.max_bucket_id -= 1
bucket['id'] = self.max_bucket_id
bucket['name'] += '-target-weight'
if 'choose_args' in bucket:
del bucket['choose_args']
for child in bucket.get('children', []):
rename(child)
shadow_trees = copy.deepcopy(self.crushmap['trees'])
for tree in shadow_trees:
rename(tree)
#
# override the target weights with the weight set
#
def reweight(bucket):
if 'children' not in bucket:
return
children = bucket['children']
if 'choose_args' in bucket:
choose_arg = next(iter(bucket['choose_args'].values()))
weight_set = choose_arg['weight_set'][0]
for i in range(len(children)):
children[i]['weight'] = weight_set[i]
del bucket['choose_args']
for child in children:
reweight(child)
for tree in self.crushmap['trees']:
reweight(tree)
self.crushmap['trees'].extend(shadow_trees)
def transform_to_write(self, version):
if 'choose_args' not in self.crushmap:
return False
self.choose_args_int_index(self.crushmap)
self.parse(self.crushmap)
if version >= 'luminous':
return True
self.ceph_version_compat()
self.parse(self.crushmap)
return True
def to_file(self, path, format, version):
if format == 'python-json':
super(CephCrush, self).to_file(path)
else:
self.transform_to_write(version)
info = self.crushmap.get('private')
self.c.ceph_write(path, format, info)
if info and info.get('tunables') and format == 'txt':
CephTunablesConverter.rewrite_tunables_txt(info['tunables'], path, version)
class Ceph(main.Main):
def __init__(self):
super(Ceph, self).__init__()
self.parser.add_argument(
'--no-backward-compatibility',
dest='backward_compatibility',
action='store_false', default=True,
help='do not allow backward compatibility tunables (default: allowed)')
convert.Convert.set_parser(self.subparsers, self.hook_convert_args)
def create_parser(self):
self.parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description=textwrap.dedent("""\
Ceph crush compare and analyze
"""),
epilog=textwrap.dedent("""
"""),
)
def clone(self):
return Ceph()
def hook_common_args(self, parser):
parser.add_argument(
'--pool',
help='pool',
type=int)
parser.add_argument(
'--pg-num',
help='pg-num',
type=int)
parser.add_argument(
'--pgp-num',
help='pgp-num',
type=int)
def hook_common_post_sanity_check_args(self, args):
if self.args.pool and self.args.values_count != analyze.Analyze.DEFAULT_VALUES_COUNT:
raise Exception("--pool and --values-count are mutually exclusive")
if self.args.pool:
if not self.args.pg_num:
raise Exception("--pg-num is required with --pool")
if not self.args.pgp_num:
raise Exception("--pgp-num is required with --pool")
formats = ('txt', 'json', 'python-json', 'crush')
def in_args(self, parser):
parser.add_argument(
'--in-path',
help='path of the input file')
parser.add_argument(
'--in-format',
choices=Ceph.formats,
help='format of the input file')
def hook_in_args_pre_sanity_check(self, args):
if not args.in_path:
raise Exception("missing --in-path")
def out_args(self, parser):
parser.add_argument(
'--out-format',
choices=Ceph.formats,
default='crush',
help='format of the output file')
versions = ('f', 'firefly',
'g', 'giant',
'h', 'hammer',
'i', 'infernalis',
'j', 'jewel',
'k', 'kraken',
'l', 'luminous',
'm',
'n')
parser.add_argument(
'--out-version',
choices=versions,
default='luminous',
help='version of the output file (default luminous)')
def hook_convert_args(self, parser):
self.in_args(parser)
self.out_args(parser)
def hook_convert_pre_sanity_check_args(self, args):
self.hook_in_args_pre_sanity_check(args)
def hook_convert_post_sanity_check_args(self, args):
pass
def hook_analyze_args(self, parser):
self.hook_common_args(parser)
def hook_analyze_pre_sanity_check_args(self, args):
super(Ceph, self).hook_analyze_pre_sanity_check_args(args)
def hook_analyze_post_sanity_check_args(self, args):
super(Ceph, self).hook_analyze_post_sanity_check_args(args)
self.hook_common_post_sanity_check_args(args)
def hook_compare_args(self, parser):
self.hook_common_args(parser)
def hook_compare_pre_sanity_check_args(self, args):
super(Ceph, self).hook_compare_pre_sanity_check_args(args)
def hook_compare_post_sanity_check_args(self, args):
super(Ceph, self).hook_compare_post_sanity_check_args(args)
self.hook_common_post_sanity_check_args(args)
def hook_optimize_args(self, parser):
self.hook_common_args(parser)
self.out_args(parser)
def hook_optimize_pre_sanity_check_args(self, args):
super(Ceph, self).hook_optimize_pre_sanity_check_args(args)
def hook_optimize_post_sanity_check_args(self, args):
super(Ceph, self).hook_optimize_post_sanity_check_args(args)
self.hook_common_post_sanity_check_args(args)
def hook_create_values(self):
if self.args.pool is not None:
return LibCrush().ceph_pool_pps(self.args.pool, self.args.pg_num, self.args.pgp_num)
else:
return super(Ceph, self).hook_create_values()
def value_name(self):
return 'PGs'
def get_ceph_version(self, crushmap):
if 'version' in crushmap['private']:
return crushmap['private']['version']
return 'l'
def has_compat_crushmap(self, crushmap):
return crushmap.get('choose_args', {}).get(" placeholder ") is not None
def get_compat_choose_args(self, crushmap):
#
# if converting from a pre-Luminous encoded crushmap
#
if not self.has_compat_crushmap(crushmap):
return None
elif crushmap.get('private', {}).get('pools', []):
if not hasattr(self.args, 'pool') or self.args.pool is None:
if len(crushmap['private']['pools']) != 1:
raise Exception('--pool is required')
pool = crushmap['private']['pools'][0]
return str(pool['pool'])
else:
pools = []
for pool in crushmap['private']['pools']:
if self.args.pool == pool['pool']:
return pool['pool']
pools.append(pool['pool'])
raise Exception(str(self.args.pool) + " is not a known pool " + str(pools))
else:
return "0"
def set_analyze_args(self, crushmap):
if 'private' not in crushmap:
return self.args.choose_args
if 'pools' not in crushmap['private']:
return self.args.choose_args
compat_pool = self.get_compat_choose_args(crushmap)
if (compat_pool is not None and
(self.args.pool is None or self.args.pool == int(compat_pool))):
self.args.pool = int(compat_pool)
self.argv.append('--pool=' + str(self.args.pool))
self.args.choose_args = str(self.args.pool)
self.argv.append('--choose-args=' + self.args.choose_args)
for pool in crushmap['private']['pools']:
if pool['pool'] == self.args.pool:
self.args.replication_count = pool['size']
self.argv.append('--replication-count=' + str(pool['size']))
self.args.pg_num = pool['pg_num']
self.argv.append('--pg-num=' + str(pool['pg_num']))
self.args.pgp_num = pool['pg_placement_num']
self.argv.append('--pgp-num=' + str(pool['pg_placement_num']))
for rule in crushmap['private']['rules']:
if rule['ruleset'] == pool['crush_ruleset']:
self.args.rule = str(rule['rule_name'])
self.argv.append('--rule=' + str(rule['rule_name']))
if crushmap.get('choose_args', {}).get(str(self.args.pool)):
self.args.choose_args = str(self.args.pool)
self.argv.append('--choose-args=' + self.args.choose_args)
log.info('argv = ' + " ".join(self.argv))
return self.args.choose_args
def set_optimize_args(self, crushmap):
if 'version' not in crushmap['private']:
return self.args.choose_args
self.args.out_version = self.get_ceph_version(crushmap)
self.argv.append('--out-version=' + self.args.out_version)
if self.args.out_version < 'luminous':
self.args.with_positions = False
self.argv.append('--no-positions')
if not hasattr(self.args, 'pool'):
return self.args.choose_args
if 'private' not in crushmap:
return self.args.choose_args
if 'pools' not in crushmap['private']:
return self.args.choose_args
if self.args.choose_args is None:
self.args.choose_args = str(self.args.pool)
self.argv.append('--choose-args=' + self.args.choose_args)
log.warning('argv = ' + " ".join(self.argv))
return self.args.choose_args
def set_compat_choose_args(self, c, crushmap, choose_args_name):
if not self.has_compat_crushmap(crushmap):
return
assert choose_args_name
choose_args = crushmap['choose_args']
choose_args[choose_args_name] = choose_args[' placeholder ']
del choose_args[' placeholder ']
c.parse(crushmap)
def convert_to_crushmap(self, crushmap):
c = CephCrush(verbose=self.args.debug,
backward_compatibility=self.args.backward_compatibility)
c.parse(crushmap)
crushmap = c.get_crushmap()
if self.args.func.__name__ == 'Analyze':
choose_args_name = self.set_analyze_args(crushmap)
elif self.args.func.__name__ == 'Optimize':
self.set_analyze_args(crushmap)
choose_args_name = self.set_optimize_args(crushmap)
elif self.args.func.__name__ == 'Convert':
choose_args_name = self.get_compat_choose_args(crushmap)
elif self.args.func.__name__ == 'Compare':
choose_args_name = self.set_analyze_args(crushmap)
else:
raise Exception('Unexpected func=' + str(self.args.func.__name__))
self.set_compat_choose_args(c, crushmap, choose_args_name)
return crushmap
def crushmap_to_file(self, crushmap):
c = CephCrush(verbose=self.args.debug,
backward_compatibility=self.args.backward_compatibility)
c.parse(crushmap)
c.to_file(self.args.out_path, self.args.out_format, self.args.out_version)
|
SpandanKumarSahu/python-crush
|
crush/ceph/__init__.py
|
Python
|
gpl-3.0
| 29,537
|
[
"Firefly"
] |
4bcb175add4419ddb18ef99c77635ab7c85423024e222d6b5da7375b7f0c5996
|
class IOSystem(object):
def __init__(self):
pass
@classmethod
def from_file(cls,geofile,basisfile):
if geofile.endswith('.xyz'):
from moha.io.iogeometry import load_xyz
molecule = load_xyz(geofile)
else:
raise ValueError('Unknown file format')
symbols = []
coordinates = []
for atom in molecule.atoms:
symbols.append(atom.symbol)
coordinates.append(atom.coordinate)
if basisfile.endswith('.nwchem'):
from moha.io.iobasis import load_nwchem
basis_set = load_nwchem(symbols,coordinates,basisfile)
else:
raise ValueError('Unknown file format')
return molecule,basis_set
|
fhqgfss/MoHa
|
moha/io/iosystem.py
|
Python
|
mit
| 764
|
[
"NWChem"
] |
3f487411dddc33ea5077ded5356c9b1b7e6070616857b40bc0124c75f73988e9
|
__all__ = ['weingarten_image_curvature']
import numpy as np
from .. import core
from ..core import ants_image as iio
from .. import utils
def weingarten_image_curvature(image, sigma=1.0, opt='mean'):
"""
Uses the weingarten map to estimate image mean or gaussian curvature
ANTsR function: `weingartenImageCurvature`
Arguments
---------
image : ANTsImage
image from which curvature is calculated
sigma : scalar
smoothing parameter
opt : string
mean by default, otherwise `gaussian` or `characterize`
Returns
-------
ANTsImage
Example
-------
>>> import ants
>>> image = ants.image_read(ants.get_ants_data('mni')).resample_image((3,3,3))
>>> imagecurv = ants.weingarten_image_curvature(image)
"""
if image.dimension not in {2,3}:
raise ValueError('image must be 2D or 3D')
if image.dimension == 2:
d = image.shape
temp = np.zeros(list(d)+[10])
for k in range(1,7):
voxvals = image[:d[0],:d[1]]
temp[:d[0],:d[1],k] = voxvals
temp = core.from_numpy(temp)
myspc = image.spacing
myspc = list(myspc) + [min(myspc)]
temp.set_spacing(myspc)
temp = temp.clone('float')
else:
temp = image.clone('float')
optnum = 0
if opt == 'gaussian':
optnum = 6
if opt == 'characterize':
optnum = 5
libfn = utils.get_lib_fn('weingartenImageCurvature')
mykout = libfn(temp.pointer, sigma, optnum)
mykout = iio.ANTsImage(pixeltype=image.pixeltype, dimension=3,
components=image.components, pointer=mykout)
if image.dimension == 3:
return mykout
elif image.dimension == 2:
subarr = core.from_numpy(mykout.numpy()[:,:,4])
return core.copy_image_info(image, subarr)
|
ANTsX/ANTsPy
|
ants/utils/weingarten_image_curvature.py
|
Python
|
apache-2.0
| 1,875
|
[
"Gaussian"
] |
994c8a89f16e3f6ebea67e15b8b940daf8dfc3d744badff0bef0291ffac28d79
|
''' Provide a base class for all objects (called Bokeh Models) that can go in
a Bokeh |Document|.
'''
from __future__ import absolute_import, print_function
import logging
logger = logging.getLogger(__file__)
from json import loads
from operator import itemgetter
from six import iteritems
from .core.json_encoder import serialize_json
from .core.properties import Any, Dict, Instance, List, String
from .core.has_props import HasProps, MetaHasProps
from .core.query import find
from .themes import default as default_theme
from .util.callback_manager import PropertyCallbackManager, EventCallbackManager
from .util.future import with_metaclass
from .util.serialization import make_id
from .util.deprecation import deprecated
from .events import Event
def collect_models(*input_values):
''' Collect a duplicate-free list of all other Bokeh models referred to by
this model, or by any of its references, etc.
Iterate over ``input_values`` and descend through their structure
collecting all nested ``Models`` on the go. The resulting list is
duplicate-free based on objects' identifiers.
Args:
*input_values (Model)
Bokeh models to collect other models from
Returns:
list[Model] : all models reachable from this one.
'''
ids = set([])
collected = []
queued = []
def queue_one(obj):
if obj._id not in ids:
queued.append(obj)
for value in input_values:
_visit_value_and_its_immediate_references(value, queue_one)
while queued:
obj = queued.pop(0)
if obj._id not in ids:
ids.add(obj._id)
collected.append(obj)
_visit_immediate_value_references(obj, queue_one)
return collected
def get_class(view_model_name):
''' Look up a Bokeh model class, given its view model name.
Args:
view_model_name (str) :
A view model name for a Bokeh model to look up
Returns:
Model: the model class corresponding to ``view_model_name``
Raises:
KeyError, if the model cannot be found
Example:
.. code-block:: python
>>> from bokeh.model import get_class
>>> get_class("Range1d")
<class 'bokeh.models.ranges.Range1d'>
'''
# in order to look up from the model catalog that MetaModel maintains, it
# has to be creates first. These imports ensure that all built-in Bokeh
# models are represented in the catalog.
from . import models; models
from .plotting import Figure; Figure
d = MetaModel.model_class_reverse_map
if view_model_name in d:
return d[view_model_name]
else:
raise KeyError("View model name '%s' not found" % view_model_name)
class MetaModel(MetaHasProps):
''' Specialize the construction of |Model| classes.
This class is a `metaclass`_ for |Model| that is responsible for
automatically cataloging all Bokeh models that get defined, so that the
serialization machinery between Bokeh and BokehJS can function properly.
.. note::
It is worth pointing out explicitly that this relies on the rules
for Metaclass inheritance in Python.
Bokeh works by replicating Python model objects (e.g. plots, ranges,
data sources, which are all |HasProps| subclasses) into BokehJS. In the
case of using a Bokeh server, the Bokeh model objects can also be
synchronized bidirectionally. This is accomplished by serializing the
models to and from a JSON format, that includes the name of the model type
as part of the payload, as well as a unique ID, and all the attributes:
.. code-block:: javascript
{
type: "Plot",
id: 100032,
attributes: { ... }
}
Typically the type name is inferred automatically from the Python class
name, and is set as the ``__view_model__`` class attribute on the Model
class that is create. But it is also possible to override this value
explicitly:
.. code-block:: python
class Foo(Model): pass
class Bar(Model):
__view_model__ == "Quux"
This metaclass will raise an error if two Bokeh models are created that
attempt to have the same view model name. The only exception made is if
one of the models has a custom ``__implementation__`` in its class
definition.
This metaclass also handles subtype relationships between Bokeh models.
Occasionally it may be necessary for multiple class types on the Python
side to resolve to the same type on the BokehJS side. This is called
subtyping, and is expressed through a ``__subtype__`` class attribute on
a model:
.. code-block:: python
class Foo(Model): pass
class Bar(Foo):
__view_model__ = "Foo"
__subtype__ = "Bar"
In this case, python instances of ``Foo`` and ``Bar`` will both resolve to
``Foo`` models in BokehJS. In the context of a Bokeh server application,
the original python types will be faithfully round-tripped. (Without the
``__subtype__`` specified, the above code would raise an error due to
duplicate view model names.)
.. _metaclass: https://docs.python.org/3/reference/datamodel.html#metaclasses
'''
model_class_reverse_map = {}
def __new__(meta_cls, class_name, bases, class_dict):
'''
Raises:
Warning
'''
# use an explicitly provided view model name if there is one
if "__view_model__" not in class_dict:
class_dict["__view_model__"] = class_name
# call the parent metaclass to create the new model type
newcls = super(MetaModel, meta_cls).__new__(meta_cls, class_name, bases, class_dict)
# update the mapping of view model names to classes, checking for any duplicates
# and handling any subtype relationships or custom implementations
entry = class_dict.get("__subtype__", class_dict["__view_model__"])
if entry in MetaModel.model_class_reverse_map and not hasattr(newcls, "__implementation__"):
raise Warning("Duplicate __view_model__ or __subtype__ declaration of '%s' for " \
"class %s. Previous definition: %s" % \
(entry, class_name,
MetaModel.model_class_reverse_map[entry]))
MetaModel.model_class_reverse_map[entry] = newcls
return newcls
_HTML_REPR = """
<script>
(function() {
var expanded = false;
var ellipsis = document.getElementById("%(ellipsis_id)s");
ellipsis.addEventListener("click", function() {
var rows = document.getElementsByClassName("%(cls_name)s");
for (var i = 0; i < rows.length; i++) {
var el = rows[i];
el.style.display = expanded ? "none" : "table-row";
}
ellipsis.innerHTML = expanded ? "…)" : "‹‹‹";
expanded = !expanded;
});
})();
</script>
"""
class Model(with_metaclass(MetaModel, HasProps, PropertyCallbackManager, EventCallbackManager)):
''' Base class for all objects stored in Bokeh |Document| instances.
'''
def __init__(self, **kwargs):
self._id = kwargs.pop("id", make_id())
self._document = None
super(Model, self).__init__(**kwargs)
default_theme.apply_to_model(self)
def __str__(self):
return "%s(id=%r, ...)" % (self.__class__.__name__, getattr(self, "_id", None))
__repr__ = __str__
name = String(help="""
An arbitrary, user-supplied name for this model.
This name can be useful when querying the document to retrieve specific
Bokeh models.
.. code:: python
>>> plot.circle([1,2,3], [4,5,6], name="temp")
>>> plot.select(name="temp")
[GlyphRenderer(id='399d53f5-73e9-44d9-9527-544b761c7705', ...)]
.. note::
No uniqueness guarantees or other conditions are enforced on any names
that are provided, nor is the name used directly by Bokeh for any
reason.
""")
tags = List(Any, help="""
An optional list of arbitrary, user-supplied values to attach to this
model.
This data can be useful when querying the document to retrieve specific
Bokeh models:
.. code:: python
>>> r = plot.circle([1,2,3], [4,5,6])
>>> r.tags = ["foo", 10]
>>> plot.select(tags=['foo', 10])
[GlyphRenderer(id='1de4c3df-a83d-480a-899b-fb263d3d5dd9', ...)]
Or simply a convenient way to attach any necessary metadata to a model
that can be accessed by CustomJS callbacks, etc.
.. note::
No uniqueness guarantees or other conditions are enforced on any tags
that are provided, nor are the tags used directly by Bokeh for any
reason.
""")
js_event_callbacks = Dict(String, List(Instance("bokeh.models.callbacks.CustomJS")),
help="""
A mapping of event names to lists of CustomJS callbacks.
Typically, rather then modifying this property directly, callbacks should be
added using the ``Model.js_on_event`` method:
.. code:: python
callback = CustomJS(code="console.log('tap event occured')")
plot.js_on_event('tap', callback)
""")
subscribed_events = List(String, help="""
List of events that are subscribed to by Python callbacks. This is
the set of events that will be communicated from BokehJS back to
Python for this model.
""")
js_property_callbacks = Dict(String, List(Instance("bokeh.models.callbacks.CustomJS")), help="""
A mapping of attribute names to lists of CustomJS callbacks, to be set up on
BokehJS side when the document is created.
Typically, rather then modifying this property directly, callbacks should be
added using the ``Model.js_on_change`` method:
.. code:: python
callback = CustomJS(code="console.log('stuff')")
plot.x_range.js_on_change('start', callback)
""")
@property
def document(self):
''' The |Document| this model is attached to (can be ``None``)
'''
return self._document
@property
def ref(self):
''' A Bokeh protocol "reference" to this model, i.e. a dict of the
form:
.. code-block:: python
{
'type' : << view model name >>
'id' : << unique model id >>
}
Additionally there may be a `subtype` field if this model is a subtype.
'''
if "__subtype__" in self.__class__.__dict__:
return {
'type' : self.__view_model__,
'subtype' : self.__subtype__,
'id' : self._id,
}
else:
return {
'type' : self.__view_model__,
'id' : self._id,
}
def js_on_event(self, event, *callbacks):
if not isinstance(event, str) and issubclass(event, Event):
event = event.event_name
if event not in self.js_event_callbacks:
self.js_event_callbacks[event] = []
for callback in callbacks:
if callback in self.js_event_callbacks[event]:
continue
self.js_event_callbacks[event].append(callback)
def js_on_change(self, event, *callbacks):
''' Attach a CustomJS callback to an arbitrary BokehJS model event.
On the BokehJS side, change events for model properties have the
form ``"change:property_name"``. As a convenience, if the event name
passed to this method is also the name of a property on the model,
then it will be prefixed with ``"change:"`` automatically:
.. code:: python
# these two are equivalent
source.js_on_change('data', callback)
source.js_on_change('change:data', callback)
However, there are other kinds of events that can be useful to respond
to, in addition to property change events. For example to run a
callback whenever data is streamed to a ``ColumnDataSource``, use the
``"stream"`` event on the source:
.. code:: python
source.js_on_change('streaming', callback)
'''
if len(callbacks) == 0:
raise ValueError("js_on_change takes an event name and one or more callbacks, got only one parameter")
# handle any CustomJS callbacks here
from bokeh.models.callbacks import CustomJS
if not all(isinstance(x, CustomJS) for x in callbacks):
raise ValueError("not all callback values are CustomJS instances")
if event in self.properties():
event = "change:%s" % event
from bokeh.models.sources import ColumnarDataSource
if isinstance(self, ColumnarDataSource):
if event == 'stream':
deprecated((0, 12, 6), "ColumnarDataSource.js_on_change('stream', ...)", "'streaming'")
event = 'streaming'
elif event == 'patch':
event = 'patching'
deprecated((0, 12, 6), "ColumnarDataSource.js_on_change('patch', ...)", "'patching'")
if event not in self.js_property_callbacks:
self.js_property_callbacks[event] = []
for callback in callbacks:
if callback in self.js_property_callbacks[event]:
continue
self.js_property_callbacks[event].append(callback)
def layout(self, side, plot):
'''
'''
try:
return self in getattr(plot, side)
except:
return []
def on_change(self, attr, *callbacks):
''' Add a callback on this object to trigger when ``attr`` changes.
Args:
attr (str) : an attribute name on this object
callback (callable) : a callback function to register
Returns:
None
'''
if attr not in self.properties():
raise ValueError("attempted to add a callback on nonexistent %s.%s property" % (self.__class__.__name__, attr))
super(Model, self).on_change(attr, *callbacks)
def references(self):
''' Returns all ``Models`` that this object has references to.
'''
return set(collect_models(self))
def select(self, selector):
''' Query this object and all of its references for objects that
match the given selector.
Args:
selector (JSON-like) :
Returns:
seq[Model]
'''
return find(self.references(), selector)
def select_one(self, selector):
''' Query this object and all of its references for objects that
match the given selector. Raises an error if more than one object
is found. Returns single matching object, or None if nothing is found
Args:
selector (JSON-like) :
Returns:
Model
'''
result = list(self.select(selector))
if len(result) > 1:
raise ValueError("Found more than one object matching %s: %r" % (selector, result))
if len(result) == 0:
return None
return result[0]
def set_select(self, selector, updates):
''' Update objects that match a given selector with the specified
attribute/value updates.
Args:
selector (JSON-like) :
updates (dict) :
Returns:
None
'''
for obj in self.select(selector):
for key, val in updates.items():
setattr(obj, key, val)
def to_json(self, include_defaults):
''' Returns a dictionary of the attributes of this object,
containing only "JSON types" (string, number, boolean,
none, dict, list).
References to other objects are serialized as "refs" (just
the object ID and type info), so the deserializer will
need to separately have the full attributes of those
other objects.
There's no corresponding from_json() because to
deserialize an object is normally done in the context of a
Document (since the Document can resolve references).
For most purposes it's best to serialize and deserialize
entire documents.
Args:
include_defaults (bool) : whether to include attributes
that haven't been changed from the default
'''
return loads(self.to_json_string(include_defaults=include_defaults))
def to_json_string(self, include_defaults):
''' Returns a JSON string encoding the attributes of this object.
References to other objects are serialized as references
(just the object ID and type info), so the deserializer
will need to separately have the full attributes of those
other objects.
There's no corresponding from_json_string() because to
deserialize an object is normally done in the context of a
Document (since the Document can resolve references).
For most purposes it's best to serialize and deserialize
entire documents.
Args:
include_defaults (bool) : whether to include attributes
that haven't been changed from the default
'''
json_like = self._to_json_like(include_defaults=include_defaults)
json_like['id'] = self._id
# serialize_json "fixes" the JSON from _to_json_like by converting
# all types into plain JSON types # (it converts Model into refs,
# for example).
return serialize_json(json_like)
def trigger(self, attr, old, new, hint=None, setter=None):
'''
'''
# The explicit assumption here is that hinted events do not need to
# go through all the same invalidation steps. Currently this is the
# case for ColumnsStreamedEvent and ColumnsPatchedEvent. However,
# this may need to be further refined in the future, if the a
# assumption does not hold for future hinted events (e.g. the hint
# could specify explicitly whether to do normal invalidation or not)
if not hint:
dirty = { 'count' : 0 }
def mark_dirty(obj):
dirty['count'] += 1
if self._document is not None:
_visit_value_and_its_immediate_references(new, mark_dirty)
_visit_value_and_its_immediate_references(old, mark_dirty)
if dirty['count'] > 0:
self._document._invalidate_all_models()
# chain up to invoke callbacks
super(Model, self).trigger(attr, old, new, hint, setter)
def _attach_document(self, doc):
''' Attach a model to a Bokeh |Document|.
This private interface should only ever called by the Document
implementation to set the private ._document field properly
'''
if self._document is not None and self._document is not doc:
raise RuntimeError("Models must be owned by only a single document, %r is already in a doc" % (self))
doc.theme.apply_to_model(self)
self._document = doc
self._update_event_callbacks()
def _detach_document(self):
''' Detach a model from a Bokeh |Document|.
This private interface should only ever called by the Document
implementation to unset the private ._document field properly
'''
self._document = None
default_theme.apply_to_model(self)
def _to_json_like(self, include_defaults):
''' Returns a dictionary of the attributes of this object, in
a layout corresponding to what BokehJS expects at unmarshalling time.
This method does not convert "Bokeh types" into "plain JSON types,"
for example each child Model will still be a Model, rather
than turning into a reference, numpy isn't handled, etc.
That's what "json like" means.
This method should be considered "private" or "protected",
for use internal to Bokeh; use to_json() instead because
it gives you only plain JSON-compatible types.
Args:
include_defaults (bool) : whether to include attributes
that haven't been changed from the default.
'''
all_attrs = self.properties_with_values(include_defaults=include_defaults)
# If __subtype__ is defined, then this model may introduce properties
# that don't exist on __view_model__ in bokehjs. Don't serialize such
# properties.
subtype = getattr(self.__class__, "__subtype__", None)
if subtype is not None and subtype != self.__class__.__view_model__:
attrs = {}
for attr, value in all_attrs.items():
if attr in self.__class__.__dict__:
continue
else:
attrs[attr] = value
else:
attrs = all_attrs
for (k, v) in attrs.items():
# we can't serialize Infinity, we send it as None and
# the other side has to fix it up. This transformation
# can't be in our json_encoder because the json
# module checks for inf before it calls the custom
# encoder.
if isinstance(v, float) and v == float('inf'):
attrs[k] = None
return attrs
def _repr_html_(self):
'''
'''
module = self.__class__.__module__
name = self.__class__.__name__
_id = getattr(self, "_id", None)
cls_name = make_id()
def row(c):
return '<div style="display: table-row;">' + c + '</div>'
def hidden_row(c):
return '<div class="%s" style="display: none;">%s</div>' % (cls_name, c)
def cell(c):
return '<div style="display: table-cell;">' + c + '</div>'
html = ''
html += '<div style="display: table;">'
ellipsis_id = make_id()
ellipsis = '<span id="%s" style="cursor: pointer;">…)</span>' % ellipsis_id
prefix = cell('<b title="%s.%s">%s</b>(' % (module, name, name))
html += row(prefix + cell('id' + ' = ' + repr(_id) + ', ' + ellipsis))
props = self.properties_with_values().items()
sorted_props = sorted(props, key=itemgetter(0))
all_props = sorted_props
for i, (prop, value) in enumerate(all_props):
end = ')' if i == len(all_props)-1 else ','
html += hidden_row(cell("") + cell(prop + ' = ' + repr(value) + end))
html += '</div>'
html += _HTML_REPR % dict(ellipsis_id=ellipsis_id, cls_name=cls_name)
return html
def _repr_pretty(self, p, cycle):
'''
'''
name = "%s.%s" % (self.__class__.__module__, self.__class__.__name__)
_id = getattr(self, "_id", None)
if cycle:
p.text(name)
p.text('(id=')
p.pretty(_id)
p.text(', ...)')
else:
with p.group(4, '%s(' % name, ')'):
props = self.properties_with_values().items()
sorted_props = sorted(props, key=itemgetter(0))
all_props = [('id', _id)] + sorted_props
for i, (prop, value) in enumerate(all_props):
if i == 0:
p.breakable('')
else:
p.text(',')
p.breakable()
p.text(prop)
p.text('=')
p.pretty(value)
def _visit_immediate_value_references(value, visitor):
''' Visit all references to another Model without recursing into any
of the child Model; may visit the same Model more than once if
it's referenced more than once. Does not visit the passed-in value.
'''
if isinstance(value, HasProps):
for attr in value.properties_with_refs():
child = getattr(value, attr)
_visit_value_and_its_immediate_references(child, visitor)
else:
_visit_value_and_its_immediate_references(value, visitor)
_common_types = {int, float, str}
def _visit_value_and_its_immediate_references(obj, visitor):
''' Recurse down Models, HasProps, and Python containers
The ordering in this function is to optimize performance. We check the
most comomn types (int, float, str) first so that we can quickly return in
the common case. We avoid isinstance and issubclass checks in a couple
places with `type` checks because isinstance checks can be slow.
'''
typ = type(obj)
if typ in _common_types: # short circuit on common base types
return
if typ is list or issubclass(typ, (list, tuple)): # check common containers
for item in obj:
_visit_value_and_its_immediate_references(item, visitor)
elif issubclass(typ, dict):
for key, value in iteritems(obj):
_visit_value_and_its_immediate_references(key, visitor)
_visit_value_and_its_immediate_references(value, visitor)
elif issubclass(typ, HasProps):
if issubclass(typ, Model):
visitor(obj)
else:
# this isn't a Model, so recurse into it
_visit_immediate_value_references(obj, visitor)
|
DuCorey/bokeh
|
bokeh/model.py
|
Python
|
bsd-3-clause
| 25,355
|
[
"VisIt"
] |
244c855f4d74fcf1ba8dcd98218b3adea16e6f33bc3499f75996c038ca2a9e25
|
import sys
sys.path.insert(1, "../../../")
import h2o, tests
def link_correct_default(ip,port):
print("Reading in original prostate data.")
h2o_data = h2o.upload_file(path=h2o.locate("smalldata/prostate/prostate.csv.zip"))
print("Compare models with link unspecified and canonical link specified.")
print("GAUSSIAN: ")
h2o_model_unspecified = h2o.glm(x=h2o_data[1:8], y=h2o_data[8], family="gaussian")
h2o_model_specified = h2o.glm(x=h2o_data[1:8], y=h2o_data[8], family="gaussian", link="identity")
assert h2o_model_specified._model_json['output']['coefficients_table'].cell_values == \
h2o_model_unspecified._model_json['output']['coefficients_table'].cell_values, "coefficient should be equal"
print("BINOMIAL: ")
h2o_model_unspecified = h2o.glm(x=h2o_data[2:9], y=h2o_data[1], family="binomial")
h2o_model_specified = h2o.glm(x=h2o_data[2:9], y=h2o_data[1], family="binomial", link="logit")
assert h2o_model_specified._model_json['output']['coefficients_table'].cell_values == \
h2o_model_unspecified._model_json['output']['coefficients_table'].cell_values, "coefficient should be equal"
print("POISSON: ")
h2o_model_unspecified = h2o.glm(x=h2o_data[2:9], y=h2o_data[1], family="poisson")
h2o_model_specified = h2o.glm(x=h2o_data[2:9], y=h2o_data[1], family="poisson", link="log")
assert h2o_model_specified._model_json['output']['coefficients_table'].cell_values == \
h2o_model_unspecified._model_json['output']['coefficients_table'].cell_values, "coefficient should be equal"
print("GAMMA: ")
h2o_model_unspecified = h2o.glm(x=h2o_data[3:9], y=h2o_data[2], family="gamma")
h2o_model_specified = h2o.glm(x=h2o_data[3:9], y=h2o_data[2], family="gamma", link="inverse")
assert h2o_model_specified._model_json['output']['coefficients_table'].cell_values == \
h2o_model_unspecified._model_json['output']['coefficients_table'].cell_values, "coefficient should be equal"
if __name__ == "__main__":
tests.run_test(sys.argv, link_correct_default)
|
bospetersen/h2o-3
|
h2o-py/tests/testdir_algos/glm/pyunit_link_correct_default_largeGLM.py
|
Python
|
apache-2.0
| 1,995
|
[
"Gaussian"
] |
864668a50556700f0467935b9a7db1f541942a5ede07357e8ee633d28e413b71
|
from pyv8 import PyV8
import datetime
class Console(PyV8.JSClass):
def log(self, text):
print "[JAVASCRIPT LOG] %s" % text
class Unknown(PyV8.JSClass):
def __init__(self):
self.isUnknown = True
def __getattr__(self, nm):
return Unknown()
def __call__(self, v):
return Unknown()
def is_unknown(g):
return isinstance(g, Unknown)
def getFunctions():
return '''
function isUnknown(ghost) {
return (typeof ghost['isUnknown'] !== "undefined") && ghost['isUnknown'];
}
function _unknowncmpEQ(a, b) {
if (isUnknown(a))
return a;
else if (isUnknown(b))
return b;
else
return a == b;
}
function _unknowncmpADD(a, b) {
if (isUnknown(a))
return a;
else if (isUnknown(b))
return b;
else
return a + b;
}
function _unknowncmpSUB(a, b) {
if (isUnknown(a))
return a;
else if (isUnknown(b))
return b;
else
return a - b;
}
function _unknowncmpGTE(a, b) {
if (isUnknown(a))
return a;
else if (isUnknown(b))
return b;
else
return a >= b;
}
function _unknowncmpLTE(a, b) {
if (isUnknown(a))
return a;
else if (isUnknown(b))
return b;
else
return a <= b;
}
function _unknowncmpGT(a, b) {
if (isUnknown(a))
return a;
else if (isUnknown(b))
return b;
else
return a > b;
}
function _unknowncmpLT(a, b) {
if (isUnknown(a))
return a;
else if (isUnknown(b))
return b;
else
return a < b;
}
function _unknowncmpAND(a, b) {
if (isUnknown(a))
return a;
else if (isUnknown(b))
return b;
else
return a && b;
}
function _unknowncmpOR(a, b) {
if (isUnknown(a))
return a;
else if (isUnknown(b))
return b;
else
return a || b;
}
'''
class Global(PyV8.JSClass):
console = Console()
def unknown(self):
return Unknown()
def isFresh(self, ghost):
return ghost.fresh
# def isUnknown(self, ghost):
# return hasattr(ghost, 'isUnknown') and ghost.isUnknown
# def _unknowncmpEQ(self, a, b):
# if self.isUnknown(a):
# return a
# if self.isUnknown(b):
# return b
# else:
# return a == b
# def _unknowncmpADD(self, a, b):
# if self.isUnknown(a):
# return a
# if self.isUnknown(b):
# return b
# else:
# if isinstance(a, str):
# return str(a) + str(b)
# return a + b
# def _unknowncmpSUB(self, a, b):
# if self.isUnknown(a):
# return a
# if self.isUnknown(b):
# return b
# else:
# if isinstance(a, str):
# return str(a) - str(b)
# return a - b
# def _unknowncmpGTE(self, a, b):
# if self.isUnknown(a):
# return a
# if self.isUnknown(b):
# return b
# else:
# return a >= b
# def _unknowncmpLTE(self, a, b):
# if self.isUnknown(a):
# return a
# if self.isUnknown(b):
# return b
# else:
# return a <= b
# def _unknowncmpLT(self, a, b):
# if self.isUnknown(a):
# return a
# if self.isUnknown(b):
# return b
# else:
# return a < b
# def _unknowncmpGT(self, a, b):
# if self.isUnknown(a):
# return a
# if self.isUnknown(b):
# return b
# else:
# return a > b
# def _unknowncmpAND(self, a, b):
# if isinstance(a, bool) and not a:
# return False
# if isinstance(b, bool) and not b:
# return False
# if self.isUnknown(a):
# return a
# if self.isUnknown(b):
# return b
# return a and b
# def _unknowncmpOR(self, a, b):
# if isinstance(a, bool) and a:
# return True
# if isinstance(b, bool) and b:
# return True
# if self.isUnknown(a):
# return a
# if self.isUnknown(b):
# return b
# return a or b
def unwrap(v):
if isinstance(v, list):
v2 = []
for item in v:
v2.append(unwrap(item))
return PyV8.JSArray(v2)
elif isinstance(v, dict):
v2 = {}
for k,val in v.items():
v2[k] = unwrap(val)
return v2
else:
return v
lock = None
entered = 0
class AssertionFailure:
pass
def eval_javascript(env, js):
global lock, entered
def assert_f(v):
if not v:
print "ASSERTION FAILED"
raise AssertionFailure()
return True
def assert_strict(v):
if hasattr(v, 'isUnknown') and v.isUnknown:
print "STRICT ASSERTION IS UNKNOWN"
raise AssertionFailure()
else:
return assert_f(v)
if not lock:
lock = PyV8.JSLocker()
lock.enter()
entered += 1
with PyV8.JSContext(Global()) as ctxt:
ctxt.locals['assert'] = assert_f
ctxt.locals['assert_strict'] = assert_strict
for k, v in env.items():
ctxt.locals[k] = unwrap(v)
#if k == 'flight':
# print ctxt.locals[k]
pres = PyV8.convert(ctxt.eval(js))
entered -= 1
if entered == 0:
lock.leave()
lock = None
return pres
def js_lookup(val, name, lookup_str):
with PyV8.JSLocker():
with PyV8.JSContext(Global()) as ctxt:
ctxt.locals[name] = val
return ctxt.eval(lookup_str)
class Visitor(object):
def __init__(self, e):
self.e = e
def onProgram(self, prog):
self.ast = prog.toAST()
self.finalstr = ''
for decl in prog.scope.declarations:
decl.visit(self)
self.finalstr += self.str
for stmt in prog.body:
stmt.visit(self)
self.finalstr += self.str
#print str(prog)
#self.json = json.loads(prog.toJSON())
def __getattr__(self, name):
if name.startswith('on'):
def return_usual(e):
#print "usual for %s" % name
self.str = str(e)
return return_usual
def onFunctionDeclaration(self, f):
f.function.visit(self)
self.str = "%s; " % (self.str)
def onForInStatement(self, f):
f.body.visit(self)
self.str = "if (isUnknown(%s)) { return unknown(); } else { for (var %s in %s) { %s } } " % (f.enumerable, f.each, f.enumerable, self.str)
def onVariableDeclaration(self, d):
self.str = "var %s;" % (d.proxy.name)
def onAssignment(self, a):
a.value.visit(self)
v = self.str
self.str = "%s = %s" % (a.target, v)
def onIfStatement(self, i):
i.condition.visit(self)
check = self.str
i.thenStatement.visit(self)
then = self.str
if i.hasElseStatement:
i.elseStatement.visit(self)
elsePart = self.str
else:
elsePart = ''
self.str = "tmp = %s; if (isUnknown(tmp)) { return unknown(); } else { if (tmp) { %s } else { %s } }" % (check, then, elsePart)
def onFunctionLiteral(self, f):
body = []
for b in f.body:
b.visit(self)
body.append(self.str)
params = []
for i in xrange(0, f.scope.num_parameters):
params.append(str(f.scope.parameter(i).name))
self.str = "function %s(%s) { %s }" % (f.name, ", ".join(params), " ".join(body))
def onBlock(self, block):
strs = []
for stmt in block.statements:
stmt.visit(self)
strs.append(self.str)
if strs == []:
return ''
self.str = "{ %s }" % " ".join(strs)
def onCall(self, stmt):
stmt.expression.visit(self)
e = self.str
args = []
for arg in stmt.args:
arg.visit(self)
args.append(self.str)
self.str = "%s(%s)" % (e, ", ".join(args))
def onExpressionStatement(self, stmt):
stmt.expression.visit(self)
self.str = "%s; " % (self.str)
def onReturnStatement(self, stmt):
stmt.expression.visit(self);
self.str = "return %s;" % (self.str)
def onCompareOperation(self, stmt):
stmt.left.visit(self)
left = self.str
stmt.right.visit(self)
right = self.str
self.str = "%s(%s, %s)" % ('_unknowncmp%s' % stmt.op, left, right)
def onBinaryOperation(self, stmt):
stmt.left.visit(self)
left = self.str
stmt.right.visit(self)
right = self.str
self.str = "%s(%s, %s)" % ('_unknowncmp%s' % stmt.op, left, right)
def rewrite_for_unknown_ops(js):
start = datetime.datetime.now()
with PyV8.JSLocker():
with PyV8.JSContext() as c:
with PyV8.JSEngine() as e:
s = e.compile(js)
visitor = Visitor(e)
s.visit(visitor)
stop = datetime.datetime.now()
diff = (stop - start).total_seconds()
print "rewrite time = %f" % (diff * 1000)
return getFunctions() + visitor.finalstr
|
wayetender/whip
|
whip/src/adapter/util/js.py
|
Python
|
gpl-2.0
| 9,411
|
[
"VisIt"
] |
de5ef1b613349914478901ef7600205110dcfd4db1b30e1c8e6148480fd32843
|
# This file is part of ts_wep.
#
# Developed for the LSST Telescope and Site Systems.
# This product includes software developed by the LSST Project
# (https://www.lsst.org).
# See the COPYRIGHT file at the top-level directory of this distribution
# for details of code ownership.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import os
import unittest
import numpy as np
import pandas as pd
import lsst.geom
import lsst.obs.lsst as obs_lsst
from lsst.daf import butler as dafButler
from lsst.obs.base import createInitialSkyWcsFromBoresight
from lsst.ts.wep.Utility import getModulePath
from lsst.ts.wep.task.GenerateDonutCatalogWcsTask import (
GenerateDonutCatalogWcsTask,
GenerateDonutCatalogWcsTaskConfig,
)
from lsst.ts.wep.Utility import runProgram, writePipetaskCmd, writeCleanUpRepoCmd
class TestGenerateDonutCatalogWcsTask(unittest.TestCase):
def setUp(self):
self.config = GenerateDonutCatalogWcsTaskConfig()
self.config.donutSelector.fluxField = "g_flux"
self.config.donutSelector.donutRadius = 0.0
self.task = GenerateDonutCatalogWcsTask(config=self.config)
moduleDir = getModulePath()
self.testDataDir = os.path.join(moduleDir, "tests", "testData")
self.repoDir = os.path.join(self.testDataDir, "gen3TestRepo")
self.centerRaft = ["R22_S10", "R22_S11"]
self.butler = dafButler.Butler(self.repoDir)
self.registry = self.butler.registry
def _getRefCat(self):
refCatList = []
datasetGenerator = self.registry.queryDatasets(
datasetType="cal_ref_cat", collections=["refcats"]
).expanded()
for ref in datasetGenerator:
refCatList.append(self.butler.getDeferred(ref, collections=["refcats"]))
return refCatList
def testValidateConfigs(self):
self.config.filterName = "r"
self.config.doDonutSelection = False
self.task = GenerateDonutCatalogWcsTask(config=self.config)
self.assertEqual(self.task.config.filterName, "r")
self.assertEqual(self.task.config.doDonutSelection, False)
def testGetRefObjLoader(self):
refCatList = self._getRefCat()
refObjLoader = self.task.getRefObjLoader(refCatList)
# Check that our refObjLoader loads the available objects
# within a given search radius
donutCatSmall = refObjLoader.loadSkyCircle(
lsst.geom.SpherePoint(0.0, 0.0, lsst.geom.degrees),
lsst.geom.Angle(0.5, lsst.geom.degrees),
filterName="g",
)
self.assertEqual(len(donutCatSmall.refCat), 8)
donutCatFull = refObjLoader.loadSkyCircle(
lsst.geom.SpherePoint(0.0, 0.0, lsst.geom.degrees),
lsst.geom.Angle(2.5, lsst.geom.degrees),
filterName="g",
)
self.assertEqual(len(donutCatFull.refCat), 24)
def testRunSelection(self):
refCatList = self._getRefCat()
self.config.referenceSelector.magLimit.maximum = 17.0
self.config.referenceSelector.magLimit.fluxField = "g_flux"
self.config.referenceSelector.doMagLimit = True
self.config.doDonutSelection = False
self.task = GenerateDonutCatalogWcsTask(config=self.config, name="Base Task")
refObjLoader = self.task.getRefObjLoader(refCatList)
bbox = lsst.geom.Box2I(lsst.geom.Point2I(0, 0), lsst.geom.Extent2I(4000, 4000))
wcs = createInitialSkyWcsFromBoresight(
lsst.geom.SpherePoint(0.0, 0.0, lsst.geom.degrees),
90.0 * lsst.geom.degrees,
obs_lsst.LsstCam().getCamera()["R22_S11"],
flipX=False,
)
# If we have a magLimit at 17 we should cut out
# the one source at 17.5.
donutCatBrighterThan17 = self.task.runSelection(refObjLoader, bbox, wcs, "g")
self.assertEqual(len(donutCatBrighterThan17), 3)
# If we increase the mag limit to 18 we should
# get all the sources in the catalog.
self.config.referenceSelector.magLimit.maximum = 18.0
self.task = GenerateDonutCatalogWcsTask(config=self.config, name="Base Task")
refObjLoader = self.task.getRefObjLoader(refCatList)
donutCatFull = self.task.runSelection(refObjLoader, bbox, wcs, "g")
self.assertEqual(len(donutCatFull), 4)
def testDonutCatalogToDataFrame(self):
refCatList = self._getRefCat()
refObjLoader = self.task.getRefObjLoader(refCatList)
# Check that our refObjLoader loads the available objects
# within a given footprint from a sample exposure
testDataId = {
"instrument": "LSSTCam",
"detector": 94,
"exposure": 4021123106001,
}
testExposure = self.butler.get(
"raw", dataId=testDataId, collections="LSSTCam/raw/all"
)
donutCatSmall = refObjLoader.loadPixelBox(
testExposure.getBBox(),
testExposure.getWcs(),
testExposure.getFilterLabel().bandLabel,
)
fieldObjects = self.task.donutCatalogToDataFrame(donutCatSmall.refCat)
self.assertEqual(len(fieldObjects), 4)
self.assertCountEqual(
fieldObjects.columns,
[
"coord_ra",
"coord_dec",
"centroid_x",
"centroid_y",
"source_flux",
],
)
# Test that None returns an empty dataframe
fieldObjectsNone = self.task.donutCatalogToDataFrame()
self.assertEqual(len(fieldObjectsNone), 0)
self.assertCountEqual(
fieldObjects.columns,
[
"coord_ra",
"coord_dec",
"centroid_x",
"centroid_y",
"source_flux",
],
)
def testPipeline(self):
"""
Test that the task runs in a pipeline. Also functions as a test of
runQuantum function.
"""
# Run pipeline command
runName = "run1"
instrument = "lsst.obs.lsst.LsstCam"
collections = "refcats,LSSTCam/calib,LSSTCam/raw/all"
exposureId = 4021123106001 # Exposure ID for test extra-focal image
testPipelineConfigDir = os.path.join(self.testDataDir, "pipelineConfigs")
pipelineYaml = os.path.join(
testPipelineConfigDir, "testDonutCatWcsPipeline.yaml"
)
pipetaskCmd = writePipetaskCmd(
self.repoDir, runName, instrument, collections, pipelineYaml=pipelineYaml
)
# Update task configuration to match pointing information
pipetaskCmd += f" -d 'exposure IN ({exposureId})'"
# Check that run doesn't already exist due to previous improper cleanup
collectionsList = list(self.registry.queryCollections())
if runName in collectionsList:
cleanUpCmd = writeCleanUpRepoCmd(self.repoDir, runName)
runProgram(cleanUpCmd)
# Run pipeline task
runProgram(pipetaskCmd)
# Test instrument matches
pipelineButler = dafButler.Butler(self.repoDir)
donutCatDf_S11 = pipelineButler.get(
"donutCatalog",
dataId={"instrument": "LSSTCam", "detector": 94, "visit": exposureId},
collections=[f"{runName}"],
)
donutCatDf_S10 = pipelineButler.get(
"donutCatalog",
dataId={"instrument": "LSSTCam", "detector": 93, "visit": exposureId},
collections=[f"{runName}"],
)
# Check 4 sources in each detector
self.assertEqual(len(donutCatDf_S11), 4)
self.assertEqual(len(donutCatDf_S10), 4)
# Check outputs are correct
outputDf = pd.concat([donutCatDf_S11, donutCatDf_S10])
self.assertEqual(len(outputDf), 8)
self.assertCountEqual(
outputDf.columns,
[
"coord_ra",
"coord_dec",
"centroid_x",
"centroid_y",
"source_flux",
],
)
self.assertCountEqual(
[
3806.7636478057957,
2806.982895217227,
607.3861483168994,
707.3972344551466,
614.607342274194,
714.6336433247832,
3815.2649173460436,
2815.0561553920156,
],
outputDf["centroid_x"],
)
self.assertCountEqual(
[
3196.070534224157,
2195.666002294077,
394.8907003737886,
394.9087004171349,
396.2407036464963,
396.22270360324296,
3196.1965343932648,
2196.188002312585,
],
outputDf["centroid_y"],
)
fluxTruth = np.ones(8)
fluxTruth[:6] = 3630780.5477010026
fluxTruth[6:] = 363078.0547701003
self.assertCountEqual(outputDf["source_flux"], fluxTruth)
# Clean up
cleanUpCmd = writeCleanUpRepoCmd(self.repoDir, runName)
runProgram(cleanUpCmd)
def testDonutCatalogGeneration(self):
"""
Test that task creates a dataframe with detector information.
"""
# Create list of deferred loaders for the ref cat
deferredList = []
datasetGenerator = self.registry.queryDatasets(
datasetType="cal_ref_cat", collections=["refcats"]
).expanded()
for ref in datasetGenerator:
deferredList.append(self.butler.getDeferred(ref, collections=["refcats"]))
expGenerator = self.registry.queryDatasets(
datasetType="raw",
collections=["LSSTCam/raw/all"],
dimensions=["exposure", "instrument"],
dataId={"exposure": 4021123106001, "instrument": "LSSTCam"},
).expanded()
expList = []
for expRef in expGenerator:
expList.append(
self.butler.get(
"raw", dataId=expRef.dataId, collections=["LSSTCam/raw/all"]
)
)
# run task on all exposures
donutCatDfList = []
for exposure in expList:
taskOutput = self.task.run(deferredList, exposure)
self.assertEqual(len(taskOutput.donutCatalog), 4)
donutCatDfList.append(taskOutput.donutCatalog)
# concatenate catalogs from each exposure into a single catalog
# to compare against the test input reference catalog
outputDf = donutCatDfList[0]
for donutCat in donutCatDfList[1:]:
outputDf = pd.concat([outputDf, donutCat])
# Compare ra, dec info to original input catalog
inputCat = np.genfromtxt(
os.path.join(
self.testDataDir, "phosimOutput", "realComCam", "skyComCamInfo.txt"
),
names=["id", "ra", "dec", "mag"],
)
self.assertEqual(len(outputDf), 8)
self.assertCountEqual(np.radians(inputCat["ra"]), outputDf["coord_ra"])
self.assertCountEqual(np.radians(inputCat["dec"]), outputDf["coord_dec"])
self.assertCountEqual(
[
3806.7636478057957,
2806.982895217227,
607.3861483168994,
707.3972344551466,
614.607342274194,
714.6336433247832,
3815.2649173460436,
2815.0561553920156,
],
outputDf["centroid_x"],
)
self.assertCountEqual(
[
3196.070534224157,
2195.666002294077,
394.8907003737886,
394.9087004171349,
396.2407036464963,
396.22270360324296,
3196.1965343932648,
2196.188002312585,
],
outputDf["centroid_y"],
)
fluxTruth = np.ones(8)
fluxTruth[:6] = 3630780.5477010026
fluxTruth[6:] = 363078.0547701003
self.assertCountEqual(outputDf["source_flux"], fluxTruth)
|
lsst-ts/ts_wep
|
tests/task/test_generateDonutCatalogWcsTask.py
|
Python
|
gpl-3.0
| 12,707
|
[
"VisIt"
] |
bf6148f98e9e3c457be3c0d5b5ba006fade9ea84b05eb7e983304cce40d5a2d2
|
"""Utility routines to handle degeneracy."""
# Copyright (C) 2014 Atsushi Togo
# All rights reserved.
#
# This file is part of phonopy.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# * Neither the name of the phonopy project nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from typing import Union
import numpy as np
from phonopy.harmonic.derivative_dynmat import DerivativeOfDynamicalMatrix
from phonopy.harmonic.dynamical_matrix import DynamicalMatrix, DynamicalMatrixNAC
def degenerate_sets(freqs, cutoff=1e-4):
"""Find degenerate bands from frequencies.
Parameters
----------
freqs : ndarray
A list of values.
shape=(values,)
cutoff : float, optional
Equivalent of values is defined by this value, i.e.,
abs(val1 - val2) < cutoff
Default is 1e-4.
Returns
-------
indices : list of list
Indices of equivalent values are grouped as a list and those groups are
stored in a list.
Example
-------
In : degenerate_sets(np.array([1.5, 2.1, 2.1, 3.4, 8]))
Out: [[0], [1, 2], [3], [4]]
"""
indices = []
done = []
for i in range(len(freqs)):
if i in done:
continue
else:
f_set = [i]
done.append(i)
for j in range(i + 1, len(freqs)):
if (np.abs(freqs[f_set] - freqs[j]) < cutoff).any():
f_set.append(j)
done.append(j)
indices.append(f_set[:])
return indices
def get_eigenvectors(
q,
dm: Union[DynamicalMatrix, DynamicalMatrixNAC],
ddm: DerivativeOfDynamicalMatrix,
perturbation=None,
derivative_order=None,
nac_q_direction=None,
):
"""Return degenerated eigenvalues and rotated eigenvalues."""
if nac_q_direction is not None and (np.abs(q) < 1e-5).all():
dm.run(q, q_direction=nac_q_direction)
else:
dm.run(q)
eigvals, eigvecs = np.linalg.eigh(dm.dynamical_matrix)
eigvals = eigvals.real
if perturbation is None:
return eigvals, eigvecs
if derivative_order is not None:
ddm.set_derivative_order(derivative_order)
dD = _get_dD(q, ddm, perturbation)
rot_eigvecs, _ = rotate_eigenvectors(eigvals, eigvecs, dD)
return eigvals, rot_eigvecs
def rotate_eigenvectors(eigvals, eigvecs, dD):
"""Rotate eigenvectors among degenerated band.
Parameters
----------
eigvals :
Eigenvalues.
shape=(num_band, )
eigvecs :
Eigenvectors.
shape=(num_atom * 3, num_band)
dD :
q-point derivative of dynamical matrix.
"""
rot_eigvecs = np.zeros_like(eigvecs)
eigvals_dD = np.zeros_like(eigvals)
for deg in degenerate_sets(eigvals):
dD_part = np.dot(eigvecs[:, deg].T.conj(), np.dot(dD, eigvecs[:, deg]))
eigvals_dD[deg], eigvecs_dD = np.linalg.eigh(dD_part)
rot_eigvecs[:, deg] = np.dot(eigvecs[:, deg], eigvecs_dD)
return rot_eigvecs, eigvals_dD
def _get_dD(q, ddm: DerivativeOfDynamicalMatrix, perturbation):
"""Return q-vector derivative of dynamical matrix.
Returns
-------
shape=(3, num_band, num_band).
"""
ddm.run(q)
ddm_vals = ddm.get_derivative_of_dynamical_matrix()
dD = np.zeros(ddm_vals.shape[1:], dtype=ddm_vals.dtype, order="C")
if len(ddm_vals) == 3:
for i in range(3):
dD += perturbation[i] * ddm_vals[i]
return dD / np.linalg.norm(perturbation)
else:
dD += perturbation[0] * perturbation[0] * ddm_vals[0]
dD += perturbation[1] * perturbation[1] * ddm_vals[1]
dD += perturbation[2] * perturbation[2] * ddm_vals[2]
dD += 2 * perturbation[0] * perturbation[1] * ddm_vals[5]
dD += 2 * perturbation[0] * perturbation[2] * ddm_vals[4]
dD += 2 * perturbation[1] * perturbation[2] * ddm_vals[3]
return dD / np.linalg.norm(perturbation) ** 2
|
atztogo/phonopy
|
phonopy/phonon/degeneracy.py
|
Python
|
bsd-3-clause
| 5,261
|
[
"phonopy"
] |
4e9f519e2651abc50157fd2c09a1983ae095166d953c5db63498851b923f9b98
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
class Node(object):
"""Node class collects helper functions for BinarySearchTree."""
def __init__(self, val, left=None, right=None, parent=None):
self.val = val
self.left = left
self.right = right
self.parent = parent
class BinarySearchTree(object):
"""Binary search tree class.
Property:
- The left subtree of a node contains only nodes with vals lesser than
the node's val.
- The right subtree of a node contains only nodes with vals greater than
the node's val.
Travesal:
- Inorder: left -> root -> right
- Preorder: root -> left -> right
- Postorder: left -> right -> root
"""
def __init__(self):
self.root = None
def search_recur(self, root, val):
"""Search val starting from root by recursion.
Time complexity: O(logn), where n is the node number.
Space complexity: O(logn).
"""
if not root:
return False
if val == root.val:
return True
elif val < root.val:
return self.search_recur(root.left, val)
else:
return self.search_recur(root.right, val)
def search_iter(self, root, val):
"""Search val starting from root by iteration.
Time complexity: O(logn), where n is the node number.
Space complexity: O(1).
"""
current = root
while current:
if val == current.val:
return True
elif val < current.val:
current = current.left
else:
current = current.right
return False
def _insert_recur_util(self, root, new_val):
"""Helper function for insert_recur()"""
new_node = Node(new_val)
if new_val < root.val:
if not root.left:
root.left = new_node
new_node.parent = root
return None
else:
self._insert_recur_util(root.left, new_val)
else:
if not root.right:
root.right = new_node
new_node.parent = root
return None
else:
self._insert_recur_util(root.right, new_val)
def insert_recur(self, new_val):
"""Insert a new node with val by recursion.
Time complexity: O(logn).
Space complexity: O(logn).
"""
if not self.root:
self.root = Node(new_val)
return None
self._insert_recur_util(self.root, new_val)
def insert_iter(self, new_val):
"""Insert a new node with val by iteration.
Use current and parent to track new node's insertion postion
and its parent.
Time complexity: O(logn).
Space complexity: O(1).
"""
new = Node(new_val)
parent = None
current = self.root
# Go down to the bottom node whose parent will be new node's parent.
while current:
parent = current
if new_val < current.val:
current = current.left
else:
current = current.right
new.parent = parent
if not parent:
# If the tree is empty.
self.root = new
elif new_val < parent.val:
parent.left = new
else:
parent.right = new
def find_minimum(self, root):
"""Find minimum starting from root.
Time complexity: O(logn), where n is the node number.
Space complexity: O(1).
"""
current = root
while current.left:
current = current.left
return current
def find_maximum(self, root):
"""Find maximum starting from root.
Time complexity: O(logn).
Space complexity: O(1).
"""
current = root
while current.right:
current = current.right
return current
def find_successor(self, root):
"""Find succesor of root, i.e. next biggest node.
Time complexity: O(logn), where n is the node number.
Space complexity: O(1).
"""
current = root
# If root's right existed, find leftmost node in root's right subtree.
if current.right:
return self.find_minimum(current.right)
# If not, go up the tree to the lowest ancestor of node,
# whose "left" child is also an ancestor of node.
parent = current.parent
while parent and current == parent.right:
current = parent
parent = parent.parent
return parent
def find_predecessor(self, root):
"""Find predecessor of root, i.e. previous biggest node.
Time complexity: O(logn), where n is the node number.
Space complexity: O(1).
"""
current = root
# If root's left existed, find rightmost node in root's left subtree.
if current.left:
return self.find_maximum(current.left)
# If not, go up the tree to the lowest ancestor of node,
# whose "right" child is also an ancestor of root.
parent = current.parent
while parent and current == parent.left:
current = parent
parent = parent.parent
return parent
def _transplant(self, from_node, to_node):
"""Helper function for delete(): Transplant a subtree.
Time complexity: O(1).
Space complexity: O(1).
"""
if not to_node.parent:
self.root = from_node
elif to_node == to_node.parent.left:
# If to_node is its parent's left node.
to_node.parent.left = from_node
else:
# If to_node is its parent's right node.
to_node.parent.right = from_node
if from_node:
from_node.parent = to_node.parent
def delete(self, del_node):
"""Delete node.
Time complexity: O(logn).
Space complexity: O(1).
"""
if not del_node.left:
# If node has no left child, transplant right subtree to it.
self._transplant(del_node.right, del_node)
elif not del_node.right:
# If node has no right child, transplant left subtree to it.
self._transplant(del_node.left, del_node)
else:
# Node has both left & right children.
# Find its "lower" succesor which has no left child.
trans_node = self.find_minimum(del_node.right)
# If trans_node's parent is not del_node,
# transplant its right node to it, and take over del_node's right.
if trans_node.parent != del_node:
self._transplant(trans_node.right, trans_node)
trans_node.right = del_node.right
trans_node.right.parent = trans_node
# Finally, transplant trans_node to del_node.
self._transplant(trans_node, del_node)
trans_node.left = del_node.left
trans_node.left.parent = trans_node
def inorder_recur(self, root):
"""Inorder traversal: left -> root -> right, by recursion.
Time complexity: O(n).
Space complexity: O(logn) for balanced tree; O(n) for single sided tree.
"""
if not root:
return None
self.inorder_recur(root.left)
print(root.val)
self.inorder_recur(root.right)
def inorder_iter(self, root):
"""Inorder traversal: left -> root -> right, by iteration.
Time complexity: O(n).
Space complexity: O(logn) for balanced tree; O(n) for single sided tree.
"""
if not root:
return None
previous = None
current = root
# Use stack for DFS for inorder traversal.
stack = []
while current or stack:
# If current exists, push to stack and visit leftmost node.
while current:
stack.append(current)
current = current.left
# Pop stack as current and print its value.
current = stack.pop()
print(current.val)
# Update current and previous by inorder traversal.
previous = current
current = current.right
def preorder_recur(self, root):
"""Preorder traversal: root -> left -> right, by recursion.
Time complexity: O(n).
Space complexity: O(logn) for balanced tree; O(n) for single sided tree.
"""
if not root:
return None
print(root.val)
self.preorder_recur(root.left)
self.preorder_recur(root.right)
def preorder_iter(self, root):
"""Preorder traversal: root -> left -> right, by iteration.
Time complexity: O(n).
Space complexity: O(logn) for balanced tree; O(n) for single sided tree.
"""
if not root:
return None
stack = [root]
while stack:
current = stack.pop()
print(current.val)
# Push right before left since we use stack with FILO.
if current.right:
stack.append(current.right)
if current.left:
stack.append(current.left)
def postorder_recur(self, root):
"""Postorder traversal: left -> right -> root, by recursion.
Time complexity: O(n).
Space complexity: O(logn) for balanced tree; O(n) for single sided tree.
"""
if not root:
return None
self.postorder_recur(root.left)
self.postorder_recur(root.right)
print(root.val)
def postorder_iter(self, root):
"""Postorder traversal: left -> right -> root, by iteration.
Time complexity: O(n).
Space complexity: O(logn) for balanced tree; O(n) for single sided tree.
"""
if not root:
return None
# Collect revsersed traverses.
rev_traverses = []
stack = [root]
while stack:
current = stack.pop()
rev_traverses.append(current.val)
# Push left before right since we use stack with FILO.
if current.left:
stack.append(current.left)
if current.right:
stack.append(current.right)
for val in rev_traverses[::-1]:
print(val)
def main():
"""
Tree:
6
/ \
4 7
/ \ \
2 5 8
"""
bst = BinarySearchTree()
bst.insert_recur(6)
bst.insert_recur(4)
bst.insert_recur(7)
bst.insert_iter(2)
bst.insert_iter(5)
bst.insert_iter(8)
# Inorder walk: 2, 4, 5, 6, 7, 8.
print('Inorder traversal by recursion:')
bst.inorder_recur(bst.root)
print('Inorder traversal by iteration:')
bst.inorder_iter(bst.root)
# Preorder walk: 6, 4, 2, 5, 7, 8.
print('Preorder traversal by recursion:')
bst.preorder_recur(bst.root)
print('Preorder traversal by iteration:')
bst.preorder_iter(bst.root)
# Postorder walk: 2, 5, 4, 8, 7, 6.
print('Postorder traversal by recursion:')
bst.postorder_recur(bst.root)
print('Postorder traversal by iteration:')
bst.postorder_iter(bst.root)
# Search existing val 6.
print('Search existing node with val 6:')
print(bst.search_recur(bst.root, 6))
print('Search existing node with val 6:')
print(bst.search_iter(bst.root, 6))
# Search nonexisting val 10.
print('Search nonexisting node with val 10:')
print(bst.search_recur(bst.root, 10))
print('Search nonexisting node with val 10:')
print(bst.search_iter(bst.root, 10))
# Find min of root: 2.
print('Find min: {}'.format(bst.find_minimum(bst.root).val))
# Find min of root's right: 7.
print('Find min from 7: {}'.format(bst.find_minimum(bst.root.right).val))
# Find max of root: 2.
print('Find max: {}'.format(bst.find_maximum(bst.root).val))
# Find max of root's right: 8.
print('Find max from 7: {}'.format(bst.find_maximum(bst.root.right).val))
# Find successor of root: 7.
print('Find successor: {}'.format(bst.find_successor(bst.root).val))
# Find successor of root's left's right: 6.
print('Find successor from left\'s right: {}'
.format(bst.find_successor(bst.root.left.right).val))
# Find predecessor of root: 5.
print('Find predecessor of root: {}'
.format(bst.find_predecessor(bst.root).val))
# Find predecessor of root's left's right: 4.
print('Find predecessor from root\'s left: {}'
.format(bst.find_predecessor(bst.root.left.right).val))
# Delete root's left: 5, the run inorder walk: 2, 5, 6, 7, 8.
bst.delete(bst.root.left)
print('Inorder traversal by recursion:')
bst.inorder_recur(bst.root)
# Further delete root: 6, the run inorder walk: 2, 5, 7, 8.
bst.delete(bst.root)
print('Inorder traversal after deleting 6:')
bst.inorder_recur(bst.root)
if __name__ == '__main__':
main()
|
bowen0701/algorithms_data_structures
|
ds_binary_search_tree.py
|
Python
|
bsd-2-clause
| 13,181
|
[
"VisIt"
] |
9a795b971cde79d805db0bb127bc7f2e4b707d1c3f660cbf7c5f0f7d2e893c8c
|
#!/usr/bin/python
#-*- coding: UTF-8 -*-
# File: blocks.py
# Solely for identifying Unicode blocks for unicode characters.
# Based on code from:
# http://stackoverflow.com/questions/243831/unicode-block-of-a-character-in-python
# But updated for 2013.
# Copyright (C) 2013-2020 Peter Murphy <peterkmurphy@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import re;
PRIV_USE_BLOCK = 151;
# If fonts characters are not in an assigned block, then they are assigned to the
# Private Use Area by default.
def block(ch):
'''
Return the Unicode block name for ch, or None if ch has no block.
>>> block(u'a')
'Basic Latin'
>>> block(unichr(0x0b80))
'Tamil'
>>> block(unichr(0xe0080))
'''
assert isinstance(ch, str) and len(ch) == 1, repr(ch);
cp = ord(ch);
for start, end, name in _blocks:
if start <= cp <= end:
return name;
def blockbyint(intval):
for start, end, name in _blocks:
if start <= intval <= end:
return name;
def namefromindex(ith):
''' Returns the name of the ith block. '''
return _blocks[ith][2];
def indexfromname(name):
''' Returns the index of a block name. '''
if name:
return _blockmap[name];
else:
return PRIV_USE_BLOCK;
def numblocks():
''' Gets the number of blocks. '''
return _blocksize;
def _initBlocks(text):
global _blocks, _blockmap, _blocksize;
_blocks = [];
_blockmap = {};
iter = 0;
pattern = re.compile(r'([0-9A-F]+)\.\.([0-9A-F]+);\ (\S.*\S)')
for line in text.splitlines():
m = pattern.match(line)
if m:
start, end, name = m.groups()
_blocks.append((int(start, 16), int(end, 16), name))
_blockmap[name] = iter;
iter += 1;
_blocksize = len(_blocks);
# retrieved from http://unicode.org/Public/UNIDATA/Blocks.txt
_initBlocks('''
# Blocks-13.0.0.txt
# Date: 2019-07-10, 19:06:00 GMT [KW]
# © 2019 Unicode®, Inc.
# For terms of use, see http://www.unicode.org/terms_of_use.html
#
# Unicode Character Database
# For documentation, see http://www.unicode.org/reports/tr44/
#
# Format:
# Start Code..End Code; Block Name
# ================================================
# Note: When comparing block names, casing, whitespace, hyphens,
# and underbars are ignored.
# For example, "Latin Extended-A" and "latin extended a" are equivalent.
# For more information on the comparison of property values,
# see UAX #44: http://www.unicode.org/reports/tr44/
#
# All block ranges start with a value where (cp MOD 16) = 0,
# and end with a value where (cp MOD 16) = 15. In other words,
# the last hexadecimal digit of the start of range is ...0
# and the last hexadecimal digit of the end of range is ...F.
# This constraint on block ranges guarantees that allocations
# are done in terms of whole columns, and that code chart display
# never involves splitting columns in the charts.
#
# All code points not explicitly listed for Block
# have the value No_Block.
# Property: Block
#
# @missing: 0000..10FFFF; No_Block
0000..007F; Basic Latin
0080..00FF; Latin-1 Supplement
0100..017F; Latin Extended-A
0180..024F; Latin Extended-B
0250..02AF; IPA Extensions
02B0..02FF; Spacing Modifier Letters
0300..036F; Combining Diacritical Marks
0370..03FF; Greek and Coptic
0400..04FF; Cyrillic
0500..052F; Cyrillic Supplement
0530..058F; Armenian
0590..05FF; Hebrew
0600..06FF; Arabic
0700..074F; Syriac
0750..077F; Arabic Supplement
0780..07BF; Thaana
07C0..07FF; NKo
0800..083F; Samaritan
0840..085F; Mandaic
0860..086F; Syriac Supplement
08A0..08FF; Arabic Extended-A
0900..097F; Devanagari
0980..09FF; Bengali
0A00..0A7F; Gurmukhi
0A80..0AFF; Gujarati
0B00..0B7F; Oriya
0B80..0BFF; Tamil
0C00..0C7F; Telugu
0C80..0CFF; Kannada
0D00..0D7F; Malayalam
0D80..0DFF; Sinhala
0E00..0E7F; Thai
0E80..0EFF; Lao
0F00..0FFF; Tibetan
1000..109F; Myanmar
10A0..10FF; Georgian
1100..11FF; Hangul Jamo
1200..137F; Ethiopic
1380..139F; Ethiopic Supplement
13A0..13FF; Cherokee
1400..167F; Unified Canadian Aboriginal Syllabics
1680..169F; Ogham
16A0..16FF; Runic
1700..171F; Tagalog
1720..173F; Hanunoo
1740..175F; Buhid
1760..177F; Tagbanwa
1780..17FF; Khmer
1800..18AF; Mongolian
18B0..18FF; Unified Canadian Aboriginal Syllabics Extended
1900..194F; Limbu
1950..197F; Tai Le
1980..19DF; New Tai Lue
19E0..19FF; Khmer Symbols
1A00..1A1F; Buginese
1A20..1AAF; Tai Tham
1AB0..1AFF; Combining Diacritical Marks Extended
1B00..1B7F; Balinese
1B80..1BBF; Sundanese
1BC0..1BFF; Batak
1C00..1C4F; Lepcha
1C50..1C7F; Ol Chiki
1C80..1C8F; Cyrillic Extended-C
1C90..1CBF; Georgian Extended
1CC0..1CCF; Sundanese Supplement
1CD0..1CFF; Vedic Extensions
1D00..1D7F; Phonetic Extensions
1D80..1DBF; Phonetic Extensions Supplement
1DC0..1DFF; Combining Diacritical Marks Supplement
1E00..1EFF; Latin Extended Additional
1F00..1FFF; Greek Extended
2000..206F; General Punctuation
2070..209F; Superscripts and Subscripts
20A0..20CF; Currency Symbols
20D0..20FF; Combining Diacritical Marks for Symbols
2100..214F; Letterlike Symbols
2150..218F; Number Forms
2190..21FF; Arrows
2200..22FF; Mathematical Operators
2300..23FF; Miscellaneous Technical
2400..243F; Control Pictures
2440..245F; Optical Character Recognition
2460..24FF; Enclosed Alphanumerics
2500..257F; Box Drawing
2580..259F; Block Elements
25A0..25FF; Geometric Shapes
2600..26FF; Miscellaneous Symbols
2700..27BF; Dingbats
27C0..27EF; Miscellaneous Mathematical Symbols-A
27F0..27FF; Supplemental Arrows-A
2800..28FF; Braille Patterns
2900..297F; Supplemental Arrows-B
2980..29FF; Miscellaneous Mathematical Symbols-B
2A00..2AFF; Supplemental Mathematical Operators
2B00..2BFF; Miscellaneous Symbols and Arrows
2C00..2C5F; Glagolitic
2C60..2C7F; Latin Extended-C
2C80..2CFF; Coptic
2D00..2D2F; Georgian Supplement
2D30..2D7F; Tifinagh
2D80..2DDF; Ethiopic Extended
2DE0..2DFF; Cyrillic Extended-A
2E00..2E7F; Supplemental Punctuation
2E80..2EFF; CJK Radicals Supplement
2F00..2FDF; Kangxi Radicals
2FF0..2FFF; Ideographic Description Characters
3000..303F; CJK Symbols and Punctuation
3040..309F; Hiragana
30A0..30FF; Katakana
3100..312F; Bopomofo
3130..318F; Hangul Compatibility Jamo
3190..319F; Kanbun
31A0..31BF; Bopomofo Extended
31C0..31EF; CJK Strokes
31F0..31FF; Katakana Phonetic Extensions
3200..32FF; Enclosed CJK Letters and Months
3300..33FF; CJK Compatibility
3400..4DBF; CJK Unified Ideographs Extension A
4DC0..4DFF; Yijing Hexagram Symbols
4E00..9FFF; CJK Unified Ideographs
A000..A48F; Yi Syllables
A490..A4CF; Yi Radicals
A4D0..A4FF; Lisu
A500..A63F; Vai
A640..A69F; Cyrillic Extended-B
A6A0..A6FF; Bamum
A700..A71F; Modifier Tone Letters
A720..A7FF; Latin Extended-D
A800..A82F; Syloti Nagri
A830..A83F; Common Indic Number Forms
A840..A87F; Phags-pa
A880..A8DF; Saurashtra
A8E0..A8FF; Devanagari Extended
A900..A92F; Kayah Li
A930..A95F; Rejang
A960..A97F; Hangul Jamo Extended-A
A980..A9DF; Javanese
A9E0..A9FF; Myanmar Extended-B
AA00..AA5F; Cham
AA60..AA7F; Myanmar Extended-A
AA80..AADF; Tai Viet
AAE0..AAFF; Meetei Mayek Extensions
AB00..AB2F; Ethiopic Extended-A
AB30..AB6F; Latin Extended-E
AB70..ABBF; Cherokee Supplement
ABC0..ABFF; Meetei Mayek
AC00..D7AF; Hangul Syllables
D7B0..D7FF; Hangul Jamo Extended-B
D800..DB7F; High Surrogates
DB80..DBFF; High Private Use Surrogates
DC00..DFFF; Low Surrogates
E000..F8FF; Private Use Area
F900..FAFF; CJK Compatibility Ideographs
FB00..FB4F; Alphabetic Presentation Forms
FB50..FDFF; Arabic Presentation Forms-A
FE00..FE0F; Variation Selectors
FE10..FE1F; Vertical Forms
FE20..FE2F; Combining Half Marks
FE30..FE4F; CJK Compatibility Forms
FE50..FE6F; Small Form Variants
FE70..FEFF; Arabic Presentation Forms-B
FF00..FFEF; Halfwidth and Fullwidth Forms
FFF0..FFFF; Specials
10000..1007F; Linear B Syllabary
10080..100FF; Linear B Ideograms
10100..1013F; Aegean Numbers
10140..1018F; Ancient Greek Numbers
10190..101CF; Ancient Symbols
101D0..101FF; Phaistos Disc
10280..1029F; Lycian
102A0..102DF; Carian
102E0..102FF; Coptic Epact Numbers
10300..1032F; Old Italic
10330..1034F; Gothic
10350..1037F; Old Permic
10380..1039F; Ugaritic
103A0..103DF; Old Persian
10400..1044F; Deseret
10450..1047F; Shavian
10480..104AF; Osmanya
104B0..104FF; Osage
10500..1052F; Elbasan
10530..1056F; Caucasian Albanian
10600..1077F; Linear A
10800..1083F; Cypriot Syllabary
10840..1085F; Imperial Aramaic
10860..1087F; Palmyrene
10880..108AF; Nabataean
108E0..108FF; Hatran
10900..1091F; Phoenician
10920..1093F; Lydian
10980..1099F; Meroitic Hieroglyphs
109A0..109FF; Meroitic Cursive
10A00..10A5F; Kharoshthi
10A60..10A7F; Old South Arabian
10A80..10A9F; Old North Arabian
10AC0..10AFF; Manichaean
10B00..10B3F; Avestan
10B40..10B5F; Inscriptional Parthian
10B60..10B7F; Inscriptional Pahlavi
10B80..10BAF; Psalter Pahlavi
10C00..10C4F; Old Turkic
10C80..10CFF; Old Hungarian
10D00..10D3F; Hanifi Rohingya
10E60..10E7F; Rumi Numeral Symbols
10E80..10EBF; Yezidi
10F00..10F2F; Old Sogdian
10F30..10F6F; Sogdian
10FB0..10FDF; Chorasmian
10FE0..10FFF; Elymaic
11000..1107F; Brahmi
11080..110CF; Kaithi
110D0..110FF; Sora Sompeng
11100..1114F; Chakma
11150..1117F; Mahajani
11180..111DF; Sharada
111E0..111FF; Sinhala Archaic Numbers
11200..1124F; Khojki
11280..112AF; Multani
112B0..112FF; Khudawadi
11300..1137F; Grantha
11400..1147F; Newa
11480..114DF; Tirhuta
11580..115FF; Siddham
11600..1165F; Modi
11660..1167F; Mongolian Supplement
11680..116CF; Takri
11700..1173F; Ahom
11800..1184F; Dogra
118A0..118FF; Warang Citi
11900..1195F; Dives Akuru
119A0..119FF; Nandinagari
11A00..11A4F; Zanabazar Square
11A50..11AAF; Soyombo
11AC0..11AFF; Pau Cin Hau
11C00..11C6F; Bhaiksuki
11C70..11CBF; Marchen
11D00..11D5F; Masaram Gondi
11D60..11DAF; Gunjala Gondi
11EE0..11EFF; Makasar
11FB0..11FBF; Lisu Supplement
11FC0..11FFF; Tamil Supplement
12000..123FF; Cuneiform
12400..1247F; Cuneiform Numbers and Punctuation
12480..1254F; Early Dynastic Cuneiform
13000..1342F; Egyptian Hieroglyphs
13430..1343F; Egyptian Hieroglyph Format Controls
14400..1467F; Anatolian Hieroglyphs
16800..16A3F; Bamum Supplement
16A40..16A6F; Mro
16AD0..16AFF; Bassa Vah
16B00..16B8F; Pahawh Hmong
16E40..16E9F; Medefaidrin
16F00..16F9F; Miao
16FE0..16FFF; Ideographic Symbols and Punctuation
17000..187FF; Tangut
18800..18AFF; Tangut Components
18B00..18CFF; Khitan Small Script
18D00..18D8F; Tangut Supplement
1B000..1B0FF; Kana Supplement
1B100..1B12F; Kana Extended-A
1B130..1B16F; Small Kana Extension
1B170..1B2FF; Nushu
1BC00..1BC9F; Duployan
1BCA0..1BCAF; Shorthand Format Controls
1D000..1D0FF; Byzantine Musical Symbols
1D100..1D1FF; Musical Symbols
1D200..1D24F; Ancient Greek Musical Notation
1D2E0..1D2FF; Mayan Numerals
1D300..1D35F; Tai Xuan Jing Symbols
1D360..1D37F; Counting Rod Numerals
1D400..1D7FF; Mathematical Alphanumeric Symbols
1D800..1DAAF; Sutton SignWriting
1E000..1E02F; Glagolitic Supplement
1E100..1E14F; Nyiakeng Puachue Hmong
1E2C0..1E2FF; Wancho
1E800..1E8DF; Mende Kikakui
1E900..1E95F; Adlam
1EC70..1ECBF; Indic Siyaq Numbers
1ED00..1ED4F; Ottoman Siyaq Numbers
1EE00..1EEFF; Arabic Mathematical Alphabetic Symbols
1F000..1F02F; Mahjong Tiles
1F030..1F09F; Domino Tiles
1F0A0..1F0FF; Playing Cards
1F100..1F1FF; Enclosed Alphanumeric Supplement
1F200..1F2FF; Enclosed Ideographic Supplement
1F300..1F5FF; Miscellaneous Symbols and Pictographs
1F600..1F64F; Emoticons
1F650..1F67F; Ornamental Dingbats
1F680..1F6FF; Transport and Map Symbols
1F700..1F77F; Alchemical Symbols
1F780..1F7FF; Geometric Shapes Extended
1F800..1F8FF; Supplemental Arrows-C
1F900..1F9FF; Supplemental Symbols and Pictographs
1FA00..1FA6F; Chess Symbols
1FA70..1FAFF; Symbols and Pictographs Extended-A
1FB00..1FBFF; Symbols for Legacy Computing
20000..2A6DF; CJK Unified Ideographs Extension B
2A700..2B73F; CJK Unified Ideographs Extension C
2B740..2B81F; CJK Unified Ideographs Extension D
2B820..2CEAF; CJK Unified Ideographs Extension E
2CEB0..2EBEF; CJK Unified Ideographs Extension F
2F800..2FA1F; CJK Compatibility Ideographs Supplement
30000..3134F; CJK Unified Ideographs Extension G
E0000..E007F; Tags
E0100..E01EF; Variation Selectors Supplement
F0000..FFFFF; Supplementary Private Use Area-A
100000..10FFFF; Supplementary Private Use Area-B
# EOF''')
if __name__ == '__main__':
print(indexfromname("Private Use Area"));
print(block('a'))
print(block(chr(0xE000)))
print(block(chr(0xF8FF)))
print(block(chr(0x10000)))
print(block(chr(0x10ffff)))
|
peterkmurphy/glyphviewer
|
glyphviewer/blocks.py
|
Python
|
gpl-3.0
| 13,058
|
[
"FEFF"
] |
bc2e8e1559fce0fb42cb1a9c8c57627d4416f8d5364b84af512dd50ec4f1ee96
|
import vtk, qt, slicer
import os, sys, shutil
import uuid
from scipy import io
import numpy as np
from . import WarpDriveUtil, GridNodeHelper
import ImportSubject
try:
import h5py
except:
slicer.util.pip_install('h5py')
import h5py
try:
import hdf5storage
except:
slicer.util.pip_install('hdf5storage')
import hdf5storage
def saveApprovedData(subjectPath):
approvedFile = os.path.join(subjectPath,'ea_coreg_approved.mat')
matfiledata = {}
if os.path.isfile(approvedFile):
try:
# read file and copy data except for glanat
with h5py.File(approvedFile,'r') as f:
for k in f.keys():
if k != 'glanat':
keyValue = f[k][()]
matfiledata[k] = keyValue
# now add approved glanat
matfiledata[u'glanat'] = np.array([2])
except: # use other reader for .mat file
f = io.loadmat(approvedFile)
for k in f.keys():
if k != 'glanat':
keyValue = f[k]
matfiledata[k] = keyValue
matfiledata['glanat'] = np.array([[2]],dtype='uint8')
io.savemat(approvedFile,matfiledata)
return
else:
matfiledata[u'glanat'] = np.array([2])
# save
# for some reason putting subject path into hdf5storage.write doesnt work
currentDir = os.getcwd()
os.chdir(subjectPath)
hdf5storage.write(matfiledata, '.', 'ea_coreg_approved.mat', matlab_compatible=True)
os.chdir(currentDir)
def checkExtensionInstall(extensionName):
em = slicer.app.extensionsManagerModel()
if not em.isExtensionInstalled(extensionName):
extensionMetaData = em.retrieveExtensionMetadataByName(extensionName)
url = os.path.join(em.serverUrl().toString(), 'download', 'item', extensionMetaData['item_id'])
extensionPackageFilename = os.path.join(slicer.app.temporaryPath, extensionMetaData['md5'])
slicer.util.downloadFile(url, extensionPackageFilename)
em.installExtension(extensionPackageFilename)
qt.QMessageBox.information(qt.QWidget(), '', 'Slicer will install %s and quit.\nPlease restart.' % extensionName)
slicer.util.exit()
return True
def updateParameterNodeFromArgs(parameterNode):
if parameterNode.GetParameter("MNIPath") != '':
return # was already called
args = sys.argv
if (len(sys.argv) > 2) and os.path.isfile(os.path.join(sys.argv[1],'lead.m')):
pathsSeparator = uuid.uuid4().hex
subjectPaths = pathsSeparator.join(sys.argv[2:])
subjectPath = subjectPaths.split(pathsSeparator)[0]
MNIPath = os.path.join(sys.argv[1],'templates','space','MNI_ICBM_2009b_NLIN_ASYM')
MNIAtlasPath = os.path.join(MNIPath,'atlases')
if sys.platform == "darwin":
ext = "maci64"
elif sys.platform.startswith('win'):
ext = 'exe'
else:
ext = 'glnxa64'
antsApplyTransformsPath = os.path.join(sys.argv[1],'ext_libs','ANTs','antsApplyTransforms.' + ext)
# set parameter node
parameterNode.SetParameter("separator", pathsSeparator)
parameterNode.SetParameter("subjectPaths", subjectPaths)
parameterNode.SetParameter("subjectN", "0")
parameterNode.SetParameter("subjectPath", subjectPath)
parameterNode.SetParameter("MNIPath", MNIPath)
parameterNode.SetParameter("MNIAtlasPath", MNIAtlasPath)
parameterNode.SetParameter("antsApplyTransformsPath", antsApplyTransformsPath)
parameterNode.SetNodeReferenceID("ImageNode", None)
parameterNode.SetNodeReferenceID("TemplateNode", None)
parameterNode.SetNodeReferenceID("Segmentation", None)
return True
def loadSubjectTransform(subjectPath, antsApplyTransformsPath):
# update subject warp fields to new lead dbs specification
if ImportSubject.ImportSubjectLogic().ish5Transform(subjectPath):
ImportSubject.ImportSubjectLogic().updateTranform(subjectPath, antsApplyTransformsPath)
# load glanat composite
glanatCompositeNode = ImportSubject.ImportSubjectLogic().importTransform(subjectPath, 'glanatComposite.nii.gz')
return glanatCompositeNode
def queryUserApproveSubject(subjectPath):
msgBox = qt.QMessageBox()
msgBox.setText('No corrections made')
msgBox.setInformativeText('Save subject as approved?')
msgBox.setStandardButtons(qt.QMessageBox().Save | qt.QMessageBox().Discard | qt.QMessageBox().Cancel)
ret = msgBox.exec_()
if ret == qt.QMessageBox().Cancel:
return False
if ret == qt.QMessageBox().Save:
saveApprovedData(subjectPath)
return True
def applyChanges(subjectPath, inputNode, imageNode):
qt.QApplication.setOverrideCursor(qt.Qt.WaitCursor)
qt.QApplication.processEvents()
# undo changes to image node
imageNode.SetAndObserveTransformNodeID(None)
# FORWARD
size, origin, spacing = GridNodeHelper.getGridDefinition(inputNode)
# harden changes in input
inputNode.HardenTransform()
# to grid transform
outNode = slicer.mrmlScene.AddNewNodeByClass('vtkMRMLTransformNode')
referenceVolume = GridNodeHelper.emptyVolume(size, origin, spacing)
slicer.modules.transforms.logic().ConvertToGridTransform(inputNode, referenceVolume, outNode)
# set to input and delete aux
inputNode.SetAndObserveTransformFromParent(outNode.GetTransformFromParent())
slicer.mrmlScene.RemoveNode(outNode)
slicer.mrmlScene.RemoveNode(referenceVolume)
# save
slicer.util.saveNode(inputNode, os.path.join(subjectPath,'glanatComposite.nii.gz'))
# BACKWARD
inputNode.Inverse()
# to grid
outNode = slicer.mrmlScene.AddNewNodeByClass('vtkMRMLTransformNode')
slicer.modules.transforms.logic().ConvertToGridTransform(inputNode, imageNode, outNode)
# save
slicer.util.saveNode(outNode, os.path.join(subjectPath,'glanatInverseComposite.nii.gz'))
# delete aux node
slicer.mrmlScene.RemoveNode(outNode)
# back to original
inputNode.Inverse()
imageNode.SetAndObserveTransformNodeID(inputNode.GetID())
qt.QApplication.setOverrideCursor(qt.QCursor(qt.Qt.ArrowCursor))
def setTargetFiducialsAsFixed():
shNode = slicer.mrmlScene.GetSubjectHierarchyNode()
fiducialNodes = slicer.mrmlScene.GetNodesByClass('vtkMRMLMarkupsFiducialNode')
fiducialNodes.UnRegister(slicer.mrmlScene)
for i in range(fiducialNodes.GetNumberOfItems()):
fiducialNode = fiducialNodes.GetItemAsObject(i)
if 'target' in shNode.GetItemAttributeNames(shNode.GetItemByDataNode(fiducialNode)):
# get parent folder
parentFolder = shNode.GetItemDataNode(shNode.GetItemParent(shNode.GetItemByDataNode(fiducialNode)))
parentFolderName = parentFolder.GetName()
# remove target attribute
shNode.RemoveItemAttribute(shNode.GetItemByDataNode(fiducialNode), 'target')
# add as fixed point
WarpDriveUtil.addFixedPoint(fiducialNode)
# remove correction
removeNodeAndChildren(parentFolder)
# change fixed point name
fiducialNode.SetName(parentFolderName)
def saveCurrentScene(subjectPath):
"""
Save corrections and fixed points is subject directory so will be loaded next time
"""
warpDriveSavePath = os.path.join(subjectPath,'WarpDrive')
# delete previous
if os.path.isdir(warpDriveSavePath):
shutil.rmtree(warpDriveSavePath)
# create directories
os.mkdir(warpDriveSavePath)
os.mkdir(os.path.join(warpDriveSavePath,'Data'))
# set scene URL
slicer.mrmlScene.SetURL(os.path.join(warpDriveSavePath, 'WarpDriveScene.mrml'))
# save corrections
shNode = slicer.mrmlScene.GetSubjectHierarchyNode()
for nodeType, nodeExt in zip(['vtkMRMLMarkupsFiducialNode', 'vtkMRMLLabelMapVolumeNode'], ['.fcsv', '.nrrd']):
nodes = slicer.mrmlScene.GetNodesByClass(nodeType)
nodes.UnRegister(slicer.mrmlScene)
for i in range(nodes.GetNumberOfItems()):
node = nodes.GetItemAsObject(i)
if 'correction' in shNode.GetItemAttributeNames(shNode.GetItemByDataNode(node)):
slicer.util.saveNode(node, os.path.join(warpDriveSavePath, 'Data', uuid.uuid4().hex + nodeExt))
# save scene
slicer.mrmlScene.Commit()
def DeleteCorrections():
shNode = slicer.mrmlScene.GetSubjectHierarchyNode()
# delete folders
folderNodes = slicer.mrmlScene.GetNodesByClass('vtkMRMLFolderDisplayNode')
folderNodes.UnRegister(slicer.mrmlScene)
for i in range(folderNodes.GetNumberOfItems()):
folderNode = folderNodes.GetItemAsObject(i)
if 'correction' in shNode.GetItemAttributeNames(shNode.GetItemByDataNode(folderNode)):
removeNodeAndChildren(folderNode)
def removeNodeAndChildren(node):
# get subject hierarchy node ID
shNode = slicer.mrmlScene.GetSubjectHierarchyNode()
nodeID = shNode.GetItemByDataNode(node)
# get children
removeIDs = vtk.vtkIdList()
shNode.GetItemChildren(nodeID, removeIDs, True)
# add selected ID
removeIDs.InsertNextId(nodeID)
# remove
for i in range(removeIDs.GetNumberOfIds()):
shNode.RemoveItem(removeIDs.GetId(i))
|
andreashorn/lead_dbs
|
ext_libs/SlicerNetstim/WarpDrive/WarpDriveLib/Helpers/LeadDBSCall.py
|
Python
|
gpl-3.0
| 8,726
|
[
"VTK"
] |
920650df2fc02c1f34cfb4e53b6e10599596642f7197258c05e8fff70ff4560f
|
import gpflow
import gpflowopt
import numpy as np
from ..utility import GPflowOptTestCase
class TestRecompile(GPflowOptTestCase):
"""
Regression test for #37
"""
def test_vgp(self):
with self.test_session():
domain = gpflowopt.domain.UnitCube(2)
X = gpflowopt.design.RandomDesign(10, domain).generate()
Y = np.sin(X[:,[0]])
m = gpflow.vgp.VGP(X, Y, gpflow.kernels.RBF(2), gpflow.likelihoods.Gaussian())
acq = gpflowopt.acquisition.ExpectedImprovement(m)
m.compile()
self.assertFalse(m._needs_recompile)
acq.evaluate(gpflowopt.design.RandomDesign(10, domain).generate())
self.assertTrue(hasattr(acq, '_evaluate_AF_storage'))
Xnew = gpflowopt.design.RandomDesign(5, domain).generate()
Ynew = np.sin(Xnew[:,[0]])
acq.set_data(np.vstack((X, Xnew)), np.vstack((Y, Ynew)))
self.assertFalse(hasattr(acq, '_needs_recompile'))
self.assertFalse(hasattr(acq, '_evaluate_AF_storage'))
acq.evaluate(gpflowopt.design.RandomDesign(10, domain).generate())
|
GPflow/GPflowOpt
|
testing/unit/test_regression.py
|
Python
|
apache-2.0
| 1,146
|
[
"Gaussian"
] |
a4db539da586cb0c1523a5f82bfdcb46e2e6facff7c7e5dc4f5e8d9519988230
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Canonicalizes break statements by de-sugaring into a control boolean."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gast
from tensorflow.contrib.py2tf.pyct import anno
from tensorflow.contrib.py2tf.pyct import templates
class BreakCanonicalizationTransformer(gast.NodeTransformer):
"""Canonicalizes continue statements into additional conditionals."""
def __init__(self, namer):
self.namer = namer
# This is a stack structure, to correctly process nested loops.
self.break_uses = []
def _create_break_check(self):
template = """
(not var_name)
"""
expr, = templates.replace(template, var_name=self.break_uses[-1][1])
return expr.value
def _create_break_trigger(self):
template = """
var_name = True
"""
block = templates.replace(template, var_name=self.break_uses[-1][1])
block.append(gast.Continue())
return block
def _create_break_init(self):
template = """
var_name = False
"""
assign, = templates.replace(template, var_name=self.break_uses[-1][1])
return assign
# TODO(mdan): Surely the transformer supports this better?
def _manual_visit_list(self, block):
new_block = []
for n in block:
new_n = self.visit(n)
if isinstance(new_n, list):
new_block.extend(new_n)
else:
new_block.append(new_n)
return new_block
def visit_While(self, node):
self.generic_visit(node.test)
scope = anno.getanno(node, 'body_scope')
break_var = self.namer.new_symbol('break_requested', scope.referenced)
self.break_uses.append([False, break_var])
node.body = self._manual_visit_list(node.body)
if self.break_uses[-1][0]:
node.test = gast.BoolOp(gast.And(), [
node.test,
gast.UnaryOp(gast.Not(), gast.Name(break_var, gast.Load(), None))
])
final_nodes = [self._create_break_init(), node]
else:
final_nodes = node
self.break_uses.pop()
for n in node.orelse:
self.generic_visit(n)
return final_nodes
def visit_For(self, node):
self.generic_visit(node.target)
self.generic_visit(node.iter)
scope = anno.getanno(node, 'body_scope')
break_var = self.namer.new_symbol('break_requested', scope.referenced)
self.break_uses.append([False, break_var])
node.body = self._manual_visit_list(node.body)
if self.break_uses[-1][0]:
anno.setanno(node, 'extra_cond',
gast.UnaryOp(gast.Not(),
gast.Name(break_var, gast.Load(), None)))
final_nodes = [self._create_break_init(), node]
else:
final_nodes = node
self.break_uses.pop()
for n in node.orelse:
self.generic_visit(n)
return final_nodes
def visit_Break(self, node):
self.break_uses[-1][0] = True
return self._create_break_trigger()
def transform(node, namer):
transformer = BreakCanonicalizationTransformer(namer)
node = transformer.visit(node)
return node
|
rabipanda/tensorflow
|
tensorflow/contrib/py2tf/converters/break_canonicalization.py
|
Python
|
apache-2.0
| 3,729
|
[
"VisIt"
] |
f84307d99c0736f34a9646f354ac1bae3c29be80c415f8cdc916b3158877c3a9
|
class BladeMap(object):
'''
A Map Relating Blades in two different algebras
Examples
-----------
>>> from clifford import Cl
>>> # Dirac Algebra `D`
>>> D, D_blades = Cl(1, 3, firstIdx=0, names='d')
>>> locals().update(D_blades)
>>> # Pauli Algebra `P`
>>> P, P_blades = Cl(3, names='p')
>>> locals().update(P_blades)
>>> sta_split = BladeMap([(d01, p1),
... (d02, p2),
... (d03, p3),
... (d12, p12),
... (d23, p23),
... (d13, p13)])
'''
def __init__(self, blades_map, map_scalars=True):
self.blades_map = blades_map
if map_scalars:
# make scalars in each algebra map
s1 = self.b1[0]._newMV(dtype=int)+1
s2 = self.b2[0]._newMV(dtype=int)+1
self.blades_map = [(s1, s2)] + self.blades_map
@property
def b1(self):
return [k[0] for k in self.blades_map]
@property
def b2(self):
return [k[1] for k in self.blades_map]
@property
def layout1(self):
return self.b1[0].layout
@property
def layout2(self):
return self.b2[0].layout
def __call__(self, A):
'''map an MV `A` according to blade_map'''
# determine direction of map
if A.layout == self.layout1:
from_b = self.b1
to_b = self.b2
elif A.layout == self.layout2:
from_b = self.b2
to_b = self.b1
else:
raise ValueError('A doesnt belong to either Algebra in this Map')
# create empty MV, and map values
B = to_b[0]._newMV(dtype=int)
for from_obj, to_obj in zip(from_b, to_b):
B += (sum(A.value*from_obj.value)*to_obj)
return B
|
arsenovic/clifford
|
clifford/_blademap.py
|
Python
|
bsd-3-clause
| 1,846
|
[
"DIRAC"
] |
edbb42f19d3848f822bf51315156c3473d8771326eb80e9ec8446b7cf3c1ca96
|
#!/usr/bin/python
import Scientific.IO.NetCDF as nc
import numpy as np
import sys
import math
import pylab as pl
import matplotlib.colors as colors
from numpy import floor, sqrt, sin, cos, arccos, arctan2, pi
fName = sys.argv[1]
try:
dFile = nc.NetCDFFile(fName, "r")
except IOError:
print "Error reading file, exiting."
sys.exit()
if "fluorescenceData" not in dFile.variables.keys():
print "Error: not a proper fluorescent file."
sys.exit()
if "Elements" in dir(dFile):
elements = str(dFile.Elements).split()
data = np.array(dFile.variables['fluorescenceData'].getValue())
data_an = np.array(dFile.variables['fluorescenceData_analytic'].getValue())
spectrum = np.array(dFile.variables['Spectrum'].getValue())
#spectrumCDF = np.array(dFile.variables['SpectrumCdf'].getValue())
#spectrumCDFInv = np.array(dFile.variables['SpectrumCdfInv'].getValue())
#spcE = np.array(dFile.SpectrumEnergy)
#testSpc = np.array(dFile.TestSpectrum)
e = dFile.Elements.split()
iSi = e.index('Si')*2
iCa = e.index('Ca')*2
iTi = e.index('Ti')*2
dFile.close()
#for i,l in enumerate(data):
#print elements[i/2], l
# pl.plot(l/data[8,:])
Na = data[2,:]+data[3,:]
Mg = data[4,:]+data[5,:]
Al = data[6,:]+data[7,:]
Si = data[iSi,:]+data[iSi+1,:]
K = data[12,:]+data[13,:]
Ca = data[iCa,:]+data[iCa+1,:]
Ti = data[iTi,:]+data[iTi+1,:]
Fe = data[20,:]+data[21,:]
Ca_an = data_an[iCa,:]+data_an[iCa+1,:]
Si_an = data_an[iSi,:]+data_an[iSi+1,:]
Ti_an = data_an[iTi,:]+data_an[iTi+1,:]
#FeaCaa = data[18,:]/data[12,:]
#FeCa = Fe/Ca
CaSi = Ca/Si
TiSi = Ti/Si
TiCa = Ti/Ca
SiAl = Si/Al
SiMg = Si/Mg
SiNa = Si/Na
CaSi_an = Ca_an/Si_an
TiSi_an = Ti_an/Si_an
TiCa_an = Ti_an/Ca_an
pl.figure(1)
#pl.plot(FeaCaa / FeaCaa[0])
pl.plot(CaSi / CaSi[0], '-', label='Ca/Si')
pl.plot(TiSi / TiSi[0], '--', label='Ti/Si')
pl.plot(TiCa / TiCa[0], '-.', label='Ti/Ca')
#pl.plot(CaSi_an / CaSi_an[0], '-', label='Ca/Si an')
#pl.plot(TiSi_an / TiSi_an[0], '--', label='Ti/Si an')
#pl.plot(TiCa_an / TiCa_an[0], '-.', label='Ti/Ca an')
#pl.plot(SiNa / SiNa[0], '-', label='Si/Na')
#pl.plot(SiAl / SiAl[0], '--', label='Si/Al')
#pl.plot(SiMg / SiMg[0], '-.', label='Si/Mg')
pl.legend(loc=0)
pl.title('Fe55 source')
#pl.figure(2)
#pl.subplot(1,4,1)
#pl.plot(spcE, spectrum)
#pl.subplot(1,4,2)
#pl.plot(spcE, spectrumCDF)
#pl.subplot(1,4,3)
#pl.plot(spectrumCDFInv)
#pl.subplot(1,4,4)
#pl.plot(testSpc)
print TiSi
print TiCa
#print CaSi/CaSi[0] - TiSi/TiSi[0]
pl.show()
print
print " %12s %12s %12s" %("Ca/Si", "Ti/Si", "Ti/Ca")
for i in range(Ca.size):
print "%3i %12.7f %12.7f %12.7f" %(i*5, CaSi[i], TiSi[i], TiCa[i])
|
dronir/EM
|
python/xrSingleDirPlot.py
|
Python
|
gpl-3.0
| 2,663
|
[
"NetCDF"
] |
12bdcd0683e26293ef5c29f43a3ee4a64dfb61f10b09b4b6b4ae0d7bb53c74b4
|
# Copyright (C) 2009 by Eric Talevich (eric.talevich@gmail.com)
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Utilities for handling, displaying and exporting Phylo trees.
Third-party libraries are loaded when the corresponding function is called.
"""
__docformat__ = "restructuredtext en"
import math
import sys
def to_networkx(tree):
"""Convert a Tree object to a networkx graph.
The result is useful for graph-oriented analysis, and also interactive
plotting with pylab, matplotlib or pygraphviz, though the resulting diagram
is usually not ideal for displaying a phylogeny.
Requires NetworkX version 0.99 or 1.0.
"""
try:
import networkx
except ImportError:
from Bio import MissingPythonDependencyError
raise MissingPythonDependencyError(
"Install NetworkX if you want to use to_networkx.")
def add_edge(graph, n1, n2):
# NB (1/2010): the networkx API congealed recently
# Ubuntu Lucid uses v0.99, newest is v1.0.1, let's support both
if networkx.__version__ >= '1.0':
graph.add_edge(n1, n2, weight=str(n2.branch_length or 1.0))
# Copy branch color value as hex, if available
if hasattr(n2, 'color') and n2.color is not None:
graph[n1][n2]['color'] = n2.color.to_hex()
elif hasattr(n1, 'color') and n1.color is not None:
# Cascading color attributes
graph[n1][n2]['color'] = n1.color.to_hex()
n2.color = n1.color
# Copy branch weight value (float) if available
if hasattr(n2, 'width') and n2.width is not None:
graph[n1][n2]['width'] = n2.width
elif hasattr(n1, 'width') and n1.width is not None:
# Cascading width attributes
graph[n1][n2]['width'] = n1.width
n2.width = n1.width
elif networkx.__version__ >= '0.99':
graph.add_edge(n1, n2, (n2.branch_length or 1.0))
else:
graph.add_edge(n1, n2)
def build_subgraph(graph, top):
"""Walk down the Tree, building graphs, edges and nodes."""
for clade in top:
graph.add_node(clade.root)
add_edge(graph, top.root, clade.root)
build_subgraph(graph, clade)
if tree.rooted:
G = networkx.DiGraph()
else:
G = networkx.Graph()
G.add_node(tree.root)
build_subgraph(G, tree.root)
return G
def draw_graphviz(tree, label_func=str, prog='twopi', args='',
node_color='#c0deff', **kwargs):
"""Display a tree or clade as a graph, using the graphviz engine.
Requires NetworkX, matplotlib, Graphviz and either PyGraphviz or pydot.
The third and fourth parameters apply to Graphviz, and the remaining
arbitrary keyword arguments are passed directly to networkx.draw(), which
in turn mostly wraps matplotlib/pylab. See the documentation for Graphviz
and networkx for detailed explanations.
The NetworkX/matplotlib parameters are described in the docstrings for
networkx.draw() and pylab.scatter(), but the most reasonable options to try
are: *alpha, node_color, node_size, node_shape, edge_color, style,
font_size, font_color, font_weight, font_family*
:Parameters:
label_func : callable
A function to extract a label from a node. By default this is str(),
but you can use a different function to select another string
associated with each node. If this function returns None for a node,
no label will be shown for that node.
The label will also be silently skipped if the throws an exception
related to ordinary attribute access (LookupError, AttributeError,
ValueError); all other exception types will still be raised. This
means you can use a lambda expression that simply attempts to look
up the desired value without checking if the intermediate attributes
are available:
>>> Phylo.draw_graphviz(tree, lambda n: n.taxonomies[0].code)
prog : string
The Graphviz program to use when rendering the graph. 'twopi'
behaves the best for large graphs, reliably avoiding crossing edges,
but for moderate graphs 'neato' looks a bit nicer. For small
directed graphs, 'dot' may produce the most normal-looking
phylogram, but will cross and distort edges in larger graphs. (The
programs 'circo' and 'fdp' are not recommended.)
args : string
Options passed to the external graphviz program. Normally not
needed, but offered here for completeness.
Example
-------
>>> import pylab
>>> from Bio import Phylo
>>> tree = Phylo.read('ex/apaf.xml', 'phyloxml')
>>> Phylo.draw_graphviz(tree)
>>> pylab.show()
>>> pylab.savefig('apaf.png')
"""
try:
import networkx
except ImportError:
from Bio import MissingPythonDependencyError
raise MissingPythonDependencyError(
"Install NetworkX if you want to use to_networkx.")
G = to_networkx(tree)
Gi = networkx.convert_node_labels_to_integers(G, discard_old_labels=False)
try:
posi = networkx.pygraphviz_layout(Gi, prog, args=args)
except ImportError:
try:
posi = networkx.pydot_layout(Gi, prog)
except ImportError:
raise MissingPythonDependencyError(
"Install PyGraphviz or Pydot if you want to use "
"draw_graphviz.")
posn = dict((n, posi[Gi.node_labels[n]]) for n in G)
def get_label_mapping(G, selection):
for node in G.nodes():
if (selection is None) or (node in selection):
try:
label = label_func(node)
if label not in (None, node.__class__.__name__):
yield (node, label)
except (LookupError, AttributeError, ValueError):
pass
if 'nodelist' in kwargs:
labels = dict(get_label_mapping(G, set(kwargs['nodelist'])))
else:
labels = dict(get_label_mapping(G, None))
kwargs['nodelist'] = labels.keys()
if 'edge_color' not in kwargs:
kwargs['edge_color'] = [isinstance(e[2], dict) and
e[2].get('color', 'k') or 'k'
for e in G.edges(data=True)]
if 'width' not in kwargs:
kwargs['width'] = [isinstance(e[2], dict) and
e[2].get('width', 1.0) or 1.0
for e in G.edges(data=True)]
networkx.draw(G, posn, labels=labels, node_color=node_color, **kwargs)
def draw_ascii(tree, file=sys.stdout, column_width=80):
"""Draw an ascii-art phylogram of the given tree.
The printed result looks like::
_________ Orange
______________|
| |______________ Tangerine
______________|
| | _________________________ Grapefruit
_| |_________|
| |______________ Pummelo
|
|__________________________________ Apple
:Parameters:
file : file-like object
File handle opened for writing the output drawing.
column_width : int
Total number of text columns used by the drawing.
"""
taxa = tree.get_terminals()
# Some constants for the drawing calculations
max_label_width = max(len(str(taxon)) for taxon in taxa)
drawing_width = column_width - max_label_width - 1
drawing_height = 2 * len(taxa) - 1
def get_col_positions(tree):
"""Create a mapping of each clade to its column position."""
depths = tree.depths()
# If there are no branch lengths, assume unit branch lengths
if not max(depths.itervalues()):
depths = tree.depths(unit_branch_lengths=True)
# Potential drawing overflow due to rounding -- 1 char per tree layer
fudge_margin = int(math.ceil(math.log(len(taxa), 2)))
cols_per_branch_unit = ((drawing_width - fudge_margin)
/ float(max(depths.itervalues())))
return dict((clade, int(round(blen*cols_per_branch_unit + 0.5)))
for clade, blen in depths.iteritems())
def get_row_positions(tree):
positions = dict((taxon, 2*idx) for idx, taxon in enumerate(taxa))
def calc_row(clade):
for subclade in clade:
if subclade not in positions:
calc_row(subclade)
positions[clade] = (positions[clade.clades[0]] +
positions[clade.clades[-1]]) / 2
calc_row(tree.root)
return positions
col_positions = get_col_positions(tree)
row_positions = get_row_positions(tree)
char_matrix = [[' ' for x in range(drawing_width)]
for y in range(drawing_height)]
def draw_clade(clade, startcol):
thiscol = col_positions[clade]
thisrow = row_positions[clade]
# Draw a horizontal line
for col in range(startcol, thiscol):
char_matrix[thisrow][col] = '_'
if clade.clades:
# Draw a vertical line
toprow = row_positions[clade.clades[0]]
botrow = row_positions[clade.clades[-1]]
for row in range(toprow+1, botrow+1):
char_matrix[row][thiscol] = '|'
# NB: Short terminal branches need something to stop rstrip()
if (col_positions[clade.clades[0]] - thiscol) < 2:
char_matrix[toprow][thiscol] = ','
# Draw descendents
for child in clade:
draw_clade(child, thiscol+1)
draw_clade(tree.root, 0)
# Print the complete drawing
for idx, row in enumerate(char_matrix):
line = ''.join(row).rstrip()
# Add labels for terminal taxa in the right margin
if idx % 2 == 0:
line += ' ' + str(taxa[idx/2])
file.write(line + '\n')
file.write('\n')
def draw(tree, label_func=str, do_show=True, show_confidence=True):
"""Plot the given tree using matplotlib (or pylab).
The graphic is a rooted tree, drawn with roughly the same algorithm as
draw_ascii.
:Parameters:
label_func : callable
A function to extract a label from a node. By default this is str(),
but you can use a different function to select another string
associated with each node. If this function returns None for a node,
no label will be shown for that node.
"""
try:
import matplotlib.pyplot as plt
except ImportError:
try:
import pylab as plt
except ImportError:
from Bio import MissingPythonDependencyError
raise MissingPythonDependencyError(
"Install matplotlib or pylab if you want to use draw.")
def get_x_positions(tree):
"""Create a mapping of each clade to its horizontal position.
Dict of {clade: x-coord}
"""
depths = tree.depths()
# If there are no branch lengths, assume unit branch lengths
if not max(depths.itervalues()):
depths = tree.depths(unit_branch_lengths=True)
return depths
def get_y_positions(tree):
"""Create a mapping of each clade to its vertical position.
Dict of {clade: y-coord}.
Coordinates are negative, and integers for tips.
"""
maxheight = tree.count_terminals()
# Rows are defined by the tips
heights = dict((tip, maxheight - i)
for i, tip in enumerate(tree.get_terminals()))
# Internal nodes: place at midpoint of children
def calc_row(clade):
for subclade in clade:
if subclade not in heights:
calc_row(subclade)
# Closure over heights
heights[clade] = (heights[clade.clades[0]] +
heights[clade.clades[-1]]) / 2.0
calc_row(tree.root)
return heights
x_posns = get_x_positions(tree)
y_posns = get_y_positions(tree)
def draw_clade(clade, x_start):
"""Recursively draw a tree, down from the given clade."""
x_here = x_posns[clade]
y_here = y_posns[clade]
# phyloXML-only graphics annotations
color = clade.__dict__.get('color') or 'k'
lw = clade.__dict__.get('width')
# Draw a horizontal line from start to here
plt.hlines(y_here, x_start, x_here, color=color, lw=lw)
# Add node/taxon labels
label = label_func(clade)
if label not in (None, clade.__class__.__name__):
plt.text(x_here, y_here, ' ' + label,
fontsize=10, verticalalignment='center')
# Add confidence
if hasattr(clade, 'confidences'):
# phyloXML supports multiple confidences
conf_label = ' '.join(map(str, map(float, clade.confidences)))
elif clade.confidence is not None:
conf_label = str(clade.confidence)
else:
conf_label = None
if conf_label:
plt.text(x_start, y_here, str(float(clade.confidence)), fontsize=9)
if clade.clades:
# Draw a vertical line connecting all children
y_top = y_posns[clade.clades[0]]
y_bot = y_posns[clade.clades[-1]]
plt.vlines(x_here, y_bot, y_top, color=color, lw=lw)
# Draw descendents
for child in clade:
draw_clade(child, x_here)
draw_clade(tree.root, 0)
if hasattr(tree, 'name') and tree.name:
plt.title(tree.name)
plt.xlabel('branch length')
plt.ylabel('taxa')
# Add margins around the tree to prevent overlapping the axes
xmin, xmax = plt.xlim()
pad = 0.05 * xmax
plt.xlim(-pad, xmax + pad)
# Also invert the y-axis (origin at the top)
plt.ylim(max(y_posns.itervalues()) + 1, 0)
if do_show:
plt.show()
|
asherkhb/coge
|
bin/last_wrapper/Bio/Phylo/_utils.py
|
Python
|
bsd-2-clause
| 14,391
|
[
"Biopython"
] |
158e109db405b0201ecc220502349af4cc465f177c5edbfbff1a92d4474e6bf4
|
from ...common import RTOL, ATOL, pandas
from ...cg.kdtree import KDTree, RADIUS_EARTH_KM
from ..util import get_points_array
from ... import cg
from ... import weights
from .. import distance as d, contiguity as c
from ...io import geotable as pdio
from ...io.fileio import FileIO as psopen
import numpy as np
from ... import examples as pysal_examples
import unittest as ut
PANDAS_EXTINCT = pandas is None
# All instances should test these four methods, and define their own functional
# tests based on common codepaths/estimated weights use cases.
class Distance_Mixin(object):
polygon_path = pysal_examples.get_path('columbus.shp')
arc_path = pysal_examples.get_path('stl_hom.shp')
points = [(10, 10), (20, 10), (40, 10),
(15, 20), (30, 20), (30, 30)]
euclidean_kdt = KDTree(points, distance_metric='euclidean')
polygon_f = psopen(polygon_path) # our file handler
poly_centroids = get_points_array(polygon_f) # our iterable
polygon_f.seek(0) #go back to head of file
arc_f = psopen(arc_path)
arc_points = get_points_array(arc_f)
arc_f.seek(0)
arc_kdt = KDTree(arc_points, distance_metric='Arc',
radius=cg.sphere.RADIUS_EARTH_KM)
cls = object # class constructor
known_wi = None #index of known w entry to compare
known_w = dict() #actual w entry
known_name = known_wi
def setUp(self):
self.__dict__.update({k:v for k,v in list(Distance_Mixin.__dict__.items())
if not k.startswith('_')})
def test_init(self):
# test vanilla, named
raise NotImplementedError('You need to implement this test '
'before this module will pass')
def test_from_shapefile(self):
# test vanilla, named, sparse
raise NotImplementedError('You need to implement this test '
'before this module will pass')
def test_from_array(self):
# test named, sparse
raise NotImplementedError('You need to implement this test '
'before this module will pass')
def test_from_dataframe(self):
# test named, columnar, defau
raise NotImplementedError('You need to implement this test '
'before this module will pass')
class Test_KNN(ut.TestCase, Distance_Mixin):
def setUp(self):
Distance_Mixin.setUp(self)
self.known_wi0 = 7
self.known_w0 = [3, 6, 12, 11]
self.known_wi1 = 0
self.known_w1 = [2, 1, 3 ,7]
self.known_wi2 = 4
self.known_w2 = [1, 3, 9, 12]
self.known_wi3 = 40
self.known_w3 = [31, 38, 45, 49]
##########################
# Classmethod tests #
##########################
def test_init(self):
w = d.KNN(self.euclidean_kdt, k=2)
self.assertEqual(w.neighbors[0], [1,3])
@ut.skipIf(PANDAS_EXTINCT, 'Missing pandas')
def test_from_dataframe(self):
df = pdio.read_files(self.polygon_path)
w = d.KNN.from_dataframe(df, k=4)
self.assertEqual(w.neighbors[self.known_wi0], self.known_w0)
self.assertEqual(w.neighbors[self.known_wi1], self.known_w1)
def test_from_array(self):
w = d.KNN.from_array(self.poly_centroids, k=4)
self.assertEqual(w.neighbors[self.known_wi0], self.known_w0)
self.assertEqual(w.neighbors[self.known_wi1], self.known_w1)
def test_from_shapefile(self):
w = d.KNN.from_shapefile(self.polygon_path, k=4)
self.assertEqual(w.neighbors[self.known_wi0], self.known_w0)
self.assertEqual(w.neighbors[self.known_wi1], self.known_w1)
##########################
# Function/User tests #
##########################
def test_reweight(self):
w = d.KNN(self.points, k=2)
new_point = [(21,21)]
wnew = w.reweight(k=4, p=1, new_data=new_point, inplace=False)
self.assertEqual(wnew[0], {1: 1.0, 3: 1.0, 4: 1.0, 6: 1.0})
def test_arcdata(self):
w = d.KNN.from_shapefile(self.polygon_path, k=4,
distance_metric='Arc',
radius=cg.sphere.RADIUS_EARTH_KM)
self.assertEqual(w.data.shape[1], 3)
class Test_DistanceBand(ut.TestCase, Distance_Mixin):
def setUp(self):
Distance_Mixin.setUp(self)
self.grid_path = pysal_examples.get_path('lattice10x10.shp')
self.grid_rook_w = c.Rook.from_shapefile(self.grid_path)
self.grid_f = psopen(self.grid_path)
self.grid_points = get_points_array(self.grid_f)
self.grid_f.seek(0)
self.grid_kdt = KDTree(self.grid_points)
##########################
# Classmethod tests #
##########################
def test_init(self):
w = d.DistanceBand(self.grid_kdt, 1)
for k,v in w:
self.assertEqual(v, self.grid_rook_w[k])
def test_from_shapefile(self):
w = d.DistanceBand.from_shapefile(self.grid_path, 1)
for k,v in w:
self.assertEqual(v, self.grid_rook_w[k])
def test_from_array(self):
w = d.DistanceBand.from_array(self.grid_points, 1)
for k,v in w:
self.assertEqual(v, self.grid_rook_w[k])
@ut.skipIf(PANDAS_EXTINCT, 'Missing pandas')
def test_from_dataframe(self):
import pandas as pd
geom_series = pdio.shp.shp2series(self.grid_path)
random_data = np.random.random(size=len(geom_series))
df = pd.DataFrame({'obs':random_data, 'geometry':geom_series})
w = d.DistanceBand.from_dataframe(df, 1)
for k,v in w:
self.assertEqual(v, self.grid_rook_w[k])
##########################
# Function/User tests #
##########################
def test_integers(self):
"""
see issue #126
"""
grid_integers = [tuple(map(int, poly.vertices[0]))
for poly in self.grid_f]
self.grid_f.seek(0)
grid_dbw = d.DistanceBand(grid_integers, 1)
for k,v in grid_dbw:
self.assertEqual(v, self.grid_rook_w[k])
def test_arcdist(self):
arc = cg.sphere.arcdist
kdt = KDTree(self.arc_points, distance_metric='Arc',
radius=cg.sphere.RADIUS_EARTH_KM)
npoints = self.arc_points.shape[0]
full = np.matrix([[arc(self.arc_points[i], self.arc_points[j])
for j in range(npoints)]
for i in range(npoints)])
maxdist = full.max()
w = d.DistanceBand(kdt, maxdist, binary=False, alpha=1.0)
np.testing.assert_allclose(w.sparse.todense(), full)
self.assertEqual(w.data.shape[1], 3)
def test_dense(self):
w_rook = c.Rook.from_shapefile(
pysal_examples.get_path('lattice10x10.shp'))
polys = psopen(pysal_examples.get_path('lattice10x10.shp'))
centroids = [p.centroid for p in polys]
w_db = d.DistanceBand(centroids, 1, build_sp=False)
for k in w_db.id_order:
np.testing.assert_equal(w_db[k], w_rook[k])
@ut.skipIf(PANDAS_EXTINCT, 'Missing pandas')
def test_named(self):
import pandas as pd
geom_series = pdio.shp.shp2series(self.grid_path)
random_data = np.random.random(size=len(geom_series))
names = [chr(x) for x in range(60,160)]
df = pd.DataFrame({'obs':random_data, 'geometry':geom_series, 'names':names})
w = d.DistanceBand.from_dataframe(df, 1, ids=df.names)
class Test_Kernel(ut.TestCase, Distance_Mixin):
def setUp(self):
Distance_Mixin.setUp(self)
self.known_wi0 = 0
self.known_w0 = {0: 1, 1: 0.500000049999995, 3: 0.4409830615267465}
self.known_wi1 = 0
self.known_w1 = {0: 1.0, 1: 0.33333333333333337,
3: 0.2546440075000701}
self.known_w1_bw = 15.
self.known_wi2 = 0
self.known_w2 = {0: 1.0, 1: 0.59999999999999998,
3: 0.55278640450004202, 4: 0.10557280900008403}
self.known_w2_bws = [25.0, 15.0, 25.0, 16.0, 14.5, 25.0]
self.known_wi3 = 0
self.known_w3 = [1.0, 0.10557289844279438, 9.9999990066379496e-08]
self.known_w3_abws =[[11.180341005532938], [11.180341005532938],
[20.000002000000002], [11.180341005532938],
[14.142137037944515], [18.027758180095585]]
self.known_wi4 = 0
self.known_w4 = {0: 0.3989422804014327,
1: 0.26741902915776961,
3: 0.24197074871621341}
self.known_w4_abws = self.known_w3_abws
self.known_wi5 = 1
self.known_w5 = {4: 0.0070787731484506233,
2: 0.2052478782400463,
3: 0.23051223027663237,
1: 1.0}
self.known_wi6 = 0
self.known_w6 = {0: 1.0, 2: 0.03178906767736345,
1: 9.9999990066379496e-08}
#stick answers & params here
##########################
# Classmethod tests #
##########################
def test_init(self):
w = d.Kernel(self.euclidean_kdt)
for k,v in list(w[self.known_wi0].items()):
np.testing.assert_allclose(v, self.known_w0[k], rtol=RTOL)
def test_from_shapefile(self):
w = d.Kernel.from_shapefile(self.polygon_path, idVariable='POLYID')
for k,v in list(w[self.known_wi5].items()):
np.testing.assert_allclose((k,v), (k,self.known_w5[k]), rtol=RTOL)
w = d.Kernel.from_shapefile(self.polygon_path, fixed=False)
for k,v in list(w[self.known_wi6].items()):
np.testing.assert_allclose((k,v), (k,self.known_w6[k]), rtol=RTOL)
def test_from_array(self):
w = d.Kernel.from_array(self.points)
for k,v in list(w[self.known_wi0].items()):
np.testing.assert_allclose(v, self.known_w0[k], rtol=RTOL)
@ut.skipIf(PANDAS_EXTINCT, 'Missing pandas')
def test_from_dataframe(self):
df = pdio.read_files(self.polygon_path)
w = d.Kernel.from_dataframe(df)
for k,v in list(w[self.known_wi5-1].items()):
np.testing.assert_allclose(v, self.known_w5[k+1], rtol=RTOL)
##########################
# Function/User tests #
##########################
def test_fixed_bandwidth(self):
w = d.Kernel(self.points, bandwidth=15.0)
for k,v in list(w[self.known_wi1].items()):
np.testing.assert_allclose((k,v), (k, self.known_w1[k]))
np.testing.assert_allclose(np.ones((w.n,1))*15, w.bandwidth)
w = d.Kernel(self.points, bandwidth=self.known_w2_bws)
for k,v in list(w[self.known_wi2].items()):
np.testing.assert_allclose((k,v), (k, self.known_w2[k]), rtol=RTOL)
for i in range(w.n):
np.testing.assert_allclose(w.bandwidth[i], self.known_w2_bws[i], rtol=RTOL)
def test_adaptive_bandwidth(self):
w = d.Kernel(self.points, fixed=False)
np.testing.assert_allclose(sorted(w[self.known_wi3].values()),
sorted(self.known_w3), rtol=RTOL)
bws = w.bandwidth.tolist()
np.testing.assert_allclose(bws, self.known_w3_abws, rtol=RTOL)
w = d.Kernel(self.points, fixed=False, function='gaussian')
for k,v in list(w[self.known_wi4].items()):
np.testing.assert_allclose((k,v), (k, self.known_w4[k]), rtol=RTOL)
bws = w.bandwidth.tolist()
np.testing.assert_allclose(bws, self.known_w4_abws, rtol=RTOL)
def test_arcdistance(self):
w = d.Kernel(self.points, fixed=True, distance_metric='Arc',
radius=cg.sphere.RADIUS_EARTH_KM)
self.assertEqual(w.data.shape[1], 3)
knn = ut.TestLoader().loadTestsFromTestCase(Test_KNN)
kern = ut.TestLoader().loadTestsFromTestCase(Test_Kernel)
db = ut.TestLoader().loadTestsFromTestCase(Test_DistanceBand)
suite = ut.TestSuite([knn, kern, db])
if __name__ == '__main__':
runner = ut.TextTestRunner()
runner.run(suite)
|
lixun910/pysal
|
pysal/lib/weights/tests/test_distance.py
|
Python
|
bsd-3-clause
| 12,184
|
[
"COLUMBUS",
"Gaussian"
] |
7f4875fa4d7ae128e4efb63077eb89195c82ccc31f3e211a9a315891ec7a000f
|
# pylint: disable=arguments-differ
""" Models for the shopping cart and assorted purchase types """
from collections import namedtuple
from datetime import datetime
from datetime import timedelta
from decimal import Decimal
import json
import analytics
from io import BytesIO
from django.db.models import Q, F
import pytz
import logging
import smtplib
import StringIO
import csv
from boto.exception import BotoServerError # this is a super-class of SESError and catches connection errors
from django.dispatch import receiver
from django.db import models
from django.conf import settings
from django.core.exceptions import ObjectDoesNotExist
from django.core.mail import send_mail
from django.contrib.auth.models import User
from django.utils.translation import ugettext as _, ugettext_lazy
from django.db import transaction
from django.db.models import Sum, Count
from django.db.models.signals import post_save, post_delete
from django.core.urlresolvers import reverse
from model_utils.managers import InheritanceManager
from model_utils.models import TimeStampedModel
from django.core.mail.message import EmailMessage
from xmodule.modulestore.django import modulestore
from eventtracking import tracker
from courseware.courses import get_course_by_id
from config_models.models import ConfigurationModel
from course_modes.models import CourseMode
from edxmako.shortcuts import render_to_string
from student.models import CourseEnrollment, UNENROLL_DONE, EnrollStatusChange
from util.query import use_read_replica_if_available
from xmodule_django.models import CourseKeyField
from .exceptions import (
InvalidCartItem,
PurchasedCallbackException,
ItemAlreadyInCartException,
AlreadyEnrolledInCourseException,
CourseDoesNotExistException,
MultipleCouponsNotAllowedException,
InvalidStatusToRetire,
UnexpectedOrderItemStatus,
ItemNotFoundInCartException
)
from shoppingcart.pdf import PDFInvoice
from openedx.core.djangoapps.site_configuration import helpers as configuration_helpers
log = logging.getLogger("shoppingcart")
ORDER_STATUSES = (
# The user is selecting what he/she wants to purchase.
('cart', 'cart'),
# The user has been sent to the external payment processor.
# At this point, the order should NOT be modified.
# If the user returns to the payment flow, he/she will start a new order.
('paying', 'paying'),
# The user has successfully purchased the items in the order.
('purchased', 'purchased'),
# The user's order has been refunded.
('refunded', 'refunded'),
# The user's order went through, but the order was erroneously left
# in 'cart'.
('defunct-cart', 'defunct-cart'),
# The user's order went through, but the order was erroneously left
# in 'paying'.
('defunct-paying', 'defunct-paying'),
)
# maps order statuses to their defunct states
ORDER_STATUS_MAP = {
'cart': 'defunct-cart',
'paying': 'defunct-paying',
}
# we need a tuple to represent the primary key of various OrderItem subclasses
OrderItemSubclassPK = namedtuple('OrderItemSubclassPK', ['cls', 'pk'])
class OrderTypes(object):
"""
This class specify purchase OrderTypes.
"""
PERSONAL = 'personal'
BUSINESS = 'business'
ORDER_TYPES = (
(PERSONAL, 'personal'),
(BUSINESS, 'business'),
)
class Order(models.Model):
"""
This is the model for an order. Before purchase, an Order and its related OrderItems are used
as the shopping cart.
FOR ANY USER, THERE SHOULD ONLY EVER BE ZERO OR ONE ORDER WITH STATUS='cart'.
"""
class Meta(object):
app_label = "shoppingcart"
user = models.ForeignKey(User, db_index=True)
currency = models.CharField(default="usd", max_length=8) # lower case ISO currency codes
status = models.CharField(max_length=32, default='cart', choices=ORDER_STATUSES)
purchase_time = models.DateTimeField(null=True, blank=True)
refunded_time = models.DateTimeField(null=True, blank=True)
# Now we store data needed to generate a reasonable receipt
# These fields only make sense after the purchase
bill_to_first = models.CharField(max_length=64, blank=True)
bill_to_last = models.CharField(max_length=64, blank=True)
bill_to_street1 = models.CharField(max_length=128, blank=True)
bill_to_street2 = models.CharField(max_length=128, blank=True)
bill_to_city = models.CharField(max_length=64, blank=True)
bill_to_state = models.CharField(max_length=8, blank=True)
bill_to_postalcode = models.CharField(max_length=16, blank=True)
bill_to_country = models.CharField(max_length=64, blank=True)
bill_to_ccnum = models.CharField(max_length=8, blank=True) # last 4 digits
bill_to_cardtype = models.CharField(max_length=32, blank=True)
# a JSON dump of the CC processor response, for completeness
processor_reply_dump = models.TextField(blank=True)
# bulk purchase registration code workflow billing details
company_name = models.CharField(max_length=255, null=True, blank=True)
company_contact_name = models.CharField(max_length=255, null=True, blank=True)
company_contact_email = models.CharField(max_length=255, null=True, blank=True)
recipient_name = models.CharField(max_length=255, null=True, blank=True)
recipient_email = models.CharField(max_length=255, null=True, blank=True)
customer_reference_number = models.CharField(max_length=63, null=True, blank=True)
order_type = models.CharField(max_length=32, default='personal', choices=OrderTypes.ORDER_TYPES)
@classmethod
def get_cart_for_user(cls, user):
"""
Always use this to preserve the property that at most 1 order per user has status = 'cart'
"""
# find the newest element in the db
try:
cart_order = cls.objects.filter(user=user, status='cart').order_by('-id')[:1].get()
except ObjectDoesNotExist:
# if nothing exists in the database, create a new cart
cart_order, _created = cls.objects.get_or_create(user=user, status='cart')
return cart_order
@classmethod
def does_user_have_cart(cls, user):
"""
Returns a boolean whether a shopping cart (Order) exists for the specified user
"""
return cls.objects.filter(user=user, status='cart').exists()
@classmethod
def user_cart_has_items(cls, user, item_types=None):
"""
Returns true if the user (anonymous user ok) has
a cart with items in it. (Which means it should be displayed.
If a item_type is passed in, then we check to see if the cart has at least one of
those types of OrderItems
"""
if not user.is_authenticated():
return False
cart = cls.get_cart_for_user(user)
if not item_types:
# check to see if the cart has at least some item in it
return cart.has_items()
else:
# if the caller is explicitly asking to check for particular types
for item_type in item_types:
if cart.has_items(item_type):
return True
return False
@classmethod
def remove_cart_item_from_order(cls, item, user):
"""
Removes the item from the cart if the item.order.status == 'cart'.
Also removes any code redemption associated with the order_item
"""
if item.order.status == 'cart':
log.info("order item %s removed for user %s", str(item.id), user)
item.delete()
# remove any redemption entry associated with the item
CouponRedemption.remove_code_redemption_from_item(item, user)
@property
def total_cost(self):
"""
Return the total cost of the cart. If the order has been purchased, returns total of
all purchased and not refunded items.
"""
return sum(i.line_cost for i in self.orderitem_set.filter(status=self.status))
def has_items(self, item_type=None):
"""
Does the cart have any items in it?
If an item_type is passed in then we check to see if there are any items of that class type
"""
if not item_type:
return self.orderitem_set.exists()
else:
items = self.orderitem_set.all().select_subclasses()
for item in items:
if isinstance(item, item_type):
return True
return False
def reset_cart_items_prices(self):
"""
Reset the items price state in the user cart
"""
for item in self.orderitem_set.all():
if item.is_discounted:
item.unit_cost = item.list_price
item.save()
def clear(self):
"""
Clear out all the items in the cart
"""
self.orderitem_set.all().delete()
@transaction.atomic
def start_purchase(self):
"""
Start the purchase process. This will set the order status to "paying",
at which point it should no longer be modified.
Future calls to `Order.get_cart_for_user()` will filter out orders with
status "paying", effectively creating a new (empty) cart.
"""
if self.status == 'cart':
self.status = 'paying'
self.save()
for item in OrderItem.objects.filter(order=self).select_subclasses():
item.start_purchase()
def update_order_type(self):
"""
updating order type. This method wil inspect the quantity associated with the OrderItem.
In the application, it is implied that when qty > 1, then the user is to purchase
'RegistrationCodes' which are randomly generated strings that users can distribute to
others in order for them to enroll in paywalled courses.
The UI/UX may change in the future to make the switching between PaidCourseRegistration
and CourseRegCodeItems a more explicit UI gesture from the purchaser
"""
cart_items = self.orderitem_set.all()
is_order_type_business = False
for cart_item in cart_items:
if cart_item.qty > 1:
is_order_type_business = True
items_to_delete = []
old_to_new_id_map = []
if is_order_type_business:
for cart_item in cart_items:
if hasattr(cart_item, 'paidcourseregistration'):
course_reg_code_item = CourseRegCodeItem.add_to_order(
self, cart_item.paidcourseregistration.course_id, cart_item.qty,
)
# update the discounted prices if coupon redemption applied
course_reg_code_item.list_price = cart_item.list_price
course_reg_code_item.unit_cost = cart_item.unit_cost
course_reg_code_item.save()
items_to_delete.append(cart_item)
old_to_new_id_map.append({"oldId": cart_item.id, "newId": course_reg_code_item.id})
else:
for cart_item in cart_items:
if hasattr(cart_item, 'courseregcodeitem'):
paid_course_registration = PaidCourseRegistration.add_to_order(
self, cart_item.courseregcodeitem.course_id,
)
# update the discounted prices if coupon redemption applied
paid_course_registration.list_price = cart_item.list_price
paid_course_registration.unit_cost = cart_item.unit_cost
paid_course_registration.save()
items_to_delete.append(cart_item)
old_to_new_id_map.append({"oldId": cart_item.id, "newId": paid_course_registration.id})
for item in items_to_delete:
item.delete()
self.order_type = OrderTypes.BUSINESS if is_order_type_business else OrderTypes.PERSONAL
self.save()
return old_to_new_id_map
def generate_pdf_receipt(self, order_items):
"""
Generates the pdf receipt for the given order_items
and returns the pdf_buffer.
"""
items_data = []
for item in order_items:
item_total = item.qty * item.unit_cost
items_data.append({
'item_description': item.pdf_receipt_display_name,
'quantity': item.qty,
'list_price': item.get_list_price(),
'discount': item.get_list_price() - item.unit_cost,
'item_total': item_total
})
pdf_buffer = BytesIO()
PDFInvoice(
items_data=items_data,
item_id=str(self.id),
date=self.purchase_time,
is_invoice=False,
total_cost=self.total_cost,
payment_received=self.total_cost,
balance=0
).generate_pdf(pdf_buffer)
return pdf_buffer
def generate_registration_codes_csv(self, orderitems, site_name):
"""
this function generates the csv file
"""
course_info = []
csv_file = StringIO.StringIO()
csv_writer = csv.writer(csv_file)
csv_writer.writerow(['Course Name', 'Registration Code', 'URL'])
for item in orderitems:
course_id = item.course_id
course = get_course_by_id(item.course_id, depth=0)
registration_codes = CourseRegistrationCode.objects.filter(course_id=course_id, order=self)
course_info.append((course.display_name, ' (' + course.start_datetime_text() + '-' + course.end_datetime_text() + ')'))
for registration_code in registration_codes:
redemption_url = reverse('register_code_redemption', args=[registration_code.code])
url = '{base_url}{redemption_url}'.format(base_url=site_name, redemption_url=redemption_url)
csv_writer.writerow([unicode(course.display_name).encode("utf-8"), registration_code.code, url])
return csv_file, course_info
def send_confirmation_emails(self, orderitems, is_order_type_business, csv_file, pdf_file, site_name, courses_info):
"""
send confirmation e-mail
"""
recipient_list = [(self.user.username, self.user.email, 'user')] # pylint: disable=no-member
if self.company_contact_email:
recipient_list.append((self.company_contact_name, self.company_contact_email, 'company_contact'))
joined_course_names = ""
if self.recipient_email:
recipient_list.append((self.recipient_name, self.recipient_email, 'email_recipient'))
courses_names_with_dates = [course_info[0] + course_info[1] for course_info in courses_info]
joined_course_names = " " + ", ".join(courses_names_with_dates)
if not is_order_type_business:
subject = _("Order Payment Confirmation")
else:
subject = _('Confirmation and Registration Codes for the following courses: {course_name_list}').format(
course_name_list=joined_course_names
)
dashboard_url = '{base_url}{dashboard}'.format(
base_url=site_name,
dashboard=reverse('dashboard')
)
try:
from_address = configuration_helpers.get_value(
'email_from_address',
settings.PAYMENT_SUPPORT_EMAIL
)
# Send a unique email for each recipient. Don't put all email addresses in a single email.
for recipient in recipient_list:
message = render_to_string(
'emails/business_order_confirmation_email.txt' if is_order_type_business else 'emails/order_confirmation_email.txt',
{
'order': self,
'recipient_name': recipient[0],
'recipient_type': recipient[2],
'site_name': site_name,
'order_items': orderitems,
'course_names': ", ".join([course_info[0] for course_info in courses_info]),
'dashboard_url': dashboard_url,
'currency_symbol': settings.PAID_COURSE_REGISTRATION_CURRENCY[1],
'order_placed_by': '{username} ({email})'.format(
username=self.user.username, email=self.user.email
),
'has_billing_info': settings.FEATURES['STORE_BILLING_INFO'],
'platform_name': configuration_helpers.get_value('platform_name', settings.PLATFORM_NAME),
'payment_support_email': configuration_helpers.get_value(
'payment_support_email', settings.PAYMENT_SUPPORT_EMAIL,
),
'payment_email_signature': configuration_helpers.get_value('payment_email_signature'),
}
)
email = EmailMessage(
subject=subject,
body=message,
from_email=from_address,
to=[recipient[1]]
)
# Only the business order is HTML formatted. A single seat order confirmation is plain text.
if is_order_type_business:
email.content_subtype = "html"
if csv_file:
email.attach(u'RegistrationCodesRedemptionUrls.csv', csv_file.getvalue(), 'text/csv')
if pdf_file is not None:
email.attach(u'Receipt.pdf', pdf_file.getvalue(), 'application/pdf')
else:
file_buffer = StringIO.StringIO(_('pdf download unavailable right now, please contact support.'))
email.attach(u'pdf_not_available.txt', file_buffer.getvalue(), 'text/plain')
email.send()
except (smtplib.SMTPException, BotoServerError): # sadly need to handle diff. mail backends individually
log.error('Failed sending confirmation e-mail for order %d', self.id)
def purchase(self, first='', last='', street1='', street2='', city='', state='', postalcode='',
country='', ccnum='', cardtype='', processor_reply_dump=''):
"""
Call to mark this order as purchased. Iterates through its OrderItems and calls
their purchased_callback
`first` - first name of person billed (e.g. John)
`last` - last name of person billed (e.g. Smith)
`street1` - first line of a street address of the billing address (e.g. 11 Cambridge Center)
`street2` - second line of a street address of the billing address (e.g. Suite 101)
`city` - city of the billing address (e.g. Cambridge)
`state` - code of the state, province, or territory of the billing address (e.g. MA)
`postalcode` - postal code of the billing address (e.g. 02142)
`country` - country code of the billing address (e.g. US)
`ccnum` - last 4 digits of the credit card number of the credit card billed (e.g. 1111)
`cardtype` - 3-digit code representing the card type used (e.g. 001)
`processor_reply_dump` - all the parameters returned by the processor
"""
if self.status == 'purchased':
log.error(
u"`purchase` method called on order {}, but order is already purchased.".format(self.id) # pylint: disable=no-member
)
return
self.status = 'purchased'
self.purchase_time = datetime.now(pytz.utc)
self.bill_to_first = first
self.bill_to_last = last
self.bill_to_city = city
self.bill_to_state = state
self.bill_to_country = country
self.bill_to_postalcode = postalcode
if settings.FEATURES['STORE_BILLING_INFO']:
self.bill_to_street1 = street1
self.bill_to_street2 = street2
self.bill_to_ccnum = ccnum
self.bill_to_cardtype = cardtype
self.processor_reply_dump = processor_reply_dump
# save these changes on the order, then we can tell when we are in an
# inconsistent state
self.save()
# this should return all of the objects with the correct types of the
# subclasses
orderitems = OrderItem.objects.filter(order=self).select_subclasses()
site_name = configuration_helpers.get_value('SITE_NAME', settings.SITE_NAME)
if self.order_type == OrderTypes.BUSINESS:
self.update_order_type()
for item in orderitems:
item.purchase_item()
csv_file = None
courses_info = []
if self.order_type == OrderTypes.BUSINESS:
#
# Generate the CSV file that contains all of the RegistrationCodes that have already been
# generated when the purchase has transacted
#
csv_file, courses_info = self.generate_registration_codes_csv(orderitems, site_name)
try:
pdf_file = self.generate_pdf_receipt(orderitems)
except Exception: # pylint: disable=broad-except
log.exception('Exception at creating pdf file.')
pdf_file = None
try:
self.send_confirmation_emails(
orderitems, self.order_type == OrderTypes.BUSINESS,
csv_file, pdf_file, site_name, courses_info
)
except Exception: # pylint: disable=broad-except
# Catch all exceptions here, since the Django view implicitly
# wraps this in a transaction. If the order completes successfully,
# we don't want to roll back just because we couldn't send
# the confirmation email.
log.exception('Error occurred while sending payment confirmation email')
self._emit_order_event('Completed Order', orderitems)
def refund(self):
"""
Refund the given order. As of right now, this just marks the order as refunded.
"""
self.status = 'refunded'
self.save()
orderitems = OrderItem.objects.filter(order=self).select_subclasses()
self._emit_order_event('Refunded Order', orderitems)
def _emit_order_event(self, event_name, orderitems):
"""
Emit an analytics event with the given name for this Order. Will iterate over all associated
OrderItems and add them as products in the event as well.
"""
try:
if settings.LMS_SEGMENT_KEY:
tracking_context = tracker.get_tracker().resolve_context()
analytics.track(self.user.id, event_name, {
'orderId': self.id,
'total': str(self.total_cost),
'currency': self.currency,
'products': [item.analytics_data() for item in orderitems]
}, context={
'ip': tracking_context.get('ip'),
'Google Analytics': {
'clientId': tracking_context.get('client_id')
}
})
except Exception: # pylint: disable=broad-except
# Capturing all exceptions thrown while tracking analytics events. We do not want
# an operation to fail because of an analytics event, so we will capture these
# errors in the logs.
log.exception(
u'Unable to emit {event} event for user {user} and order {order}'.format(
event=event_name, user=self.user.id, order=self.id)
)
def add_billing_details(self, company_name='', company_contact_name='', company_contact_email='', recipient_name='',
recipient_email='', customer_reference_number=''):
"""
This function is called after the user selects a purchase type of "Business" and
is asked to enter the optional billing details. The billing details are updated
for that order.
company_name - Name of purchasing organization
company_contact_name - Name of the key contact at the company the sale was made to
company_contact_email - Email of the key contact at the company the sale was made to
recipient_name - Name of the company should the invoice be sent to
recipient_email - Email of the company should the invoice be sent to
customer_reference_number - purchase order number of the organization associated with this Order
"""
self.company_name = company_name
self.company_contact_name = company_contact_name
self.company_contact_email = company_contact_email
self.recipient_name = recipient_name
self.recipient_email = recipient_email
self.customer_reference_number = customer_reference_number
self.save()
def generate_receipt_instructions(self):
"""
Call to generate specific instructions for each item in the order. This gets displayed on the receipt
page, typically. Instructions are something like "visit your dashboard to see your new courses".
This will return two things in a pair. The first will be a dict with keys=OrderItemSubclassPK corresponding
to an OrderItem and values=a set of html instructions they generate. The second will be a set of de-duped
html instructions
"""
instruction_set = set([]) # heh. not ia32 or alpha or sparc
instruction_dict = {}
order_items = OrderItem.objects.filter(order=self).select_subclasses()
for item in order_items:
item_pk_with_subclass, set_of_html = item.generate_receipt_instructions()
instruction_dict[item_pk_with_subclass] = set_of_html
instruction_set.update(set_of_html)
return instruction_dict, instruction_set
def retire(self):
"""
Method to "retire" orders that have gone through to the payment service
but have (erroneously) not had their statuses updated.
This method only works on orders that satisfy the following conditions:
1) the order status is either "cart" or "paying" (otherwise we raise
an InvalidStatusToRetire error)
2) the order's order item's statuses match the order's status (otherwise
we throw an UnexpectedOrderItemStatus error)
"""
# if an order is already retired, no-op:
if self.status in ORDER_STATUS_MAP.values():
return
if self.status not in ORDER_STATUS_MAP.keys():
raise InvalidStatusToRetire(
"order status {order_status} is not 'paying' or 'cart'".format(
order_status=self.status
)
)
for item in self.orderitem_set.all():
if item.status != self.status:
raise UnexpectedOrderItemStatus(
"order_item status is different from order status"
)
self.status = ORDER_STATUS_MAP[self.status]
self.save()
for item in self.orderitem_set.all():
item.retire()
def find_item_by_course_id(self, course_id):
"""
course_id: Course id of the item to find
Returns OrderItem from the Order given a course_id
Raises exception ItemNotFoundException when the item
having the given course_id is not present in the cart
"""
cart_items = OrderItem.objects.filter(order=self).select_subclasses()
found_items = []
for item in cart_items:
if getattr(item, 'course_id', None):
if item.course_id == course_id:
found_items.append(item)
if not found_items:
raise ItemNotFoundInCartException
return found_items
class OrderItem(TimeStampedModel):
"""
This is the basic interface for order items.
Order items are line items that fill up the shopping carts and orders.
Each implementation of OrderItem should provide its own purchased_callback as
a method.
"""
class Meta(object):
app_label = "shoppingcart"
objects = InheritanceManager()
order = models.ForeignKey(Order, db_index=True)
# this is denormalized, but convenient for SQL queries for reports, etc. user should always be = order.user
user = models.ForeignKey(User, db_index=True)
# this is denormalized, but convenient for SQL queries for reports, etc. status should always be = order.status
status = models.CharField(max_length=32, default='cart', choices=ORDER_STATUSES, db_index=True)
qty = models.IntegerField(default=1)
unit_cost = models.DecimalField(default=0.0, decimal_places=2, max_digits=30)
list_price = models.DecimalField(decimal_places=2, max_digits=30, null=True)
line_desc = models.CharField(default="Misc. Item", max_length=1024)
currency = models.CharField(default="usd", max_length=8) # lower case ISO currency codes
fulfilled_time = models.DateTimeField(null=True, db_index=True)
refund_requested_time = models.DateTimeField(null=True, db_index=True)
service_fee = models.DecimalField(default=0.0, decimal_places=2, max_digits=30)
# general purpose field, not user-visible. Used for reporting
report_comments = models.TextField(default="")
@property
def line_cost(self):
""" Return the total cost of this OrderItem """
return self.qty * self.unit_cost
@classmethod
def add_to_order(cls, order, *args, **kwargs):
"""
A suggested convenience function for subclasses.
NOTE: This does not add anything to the cart. That is left up to the
subclasses to implement for themselves
"""
# this is a validation step to verify that the currency of the item we
# are adding is the same as the currency of the order we are adding it
# to
currency = kwargs.get('currency', 'usd')
if order.currency != currency and order.orderitem_set.exists():
raise InvalidCartItem(_("Trying to add a different currency into the cart"))
@transaction.atomic
def purchase_item(self):
"""
This is basically a wrapper around purchased_callback that handles
modifying the OrderItem itself
"""
self.purchased_callback()
self.status = 'purchased'
self.fulfilled_time = datetime.now(pytz.utc)
self.save()
def start_purchase(self):
"""
Start the purchase process. This will set the order item status to "paying",
at which point it should no longer be modified.
"""
self.status = 'paying'
self.save()
def purchased_callback(self):
"""
This is called on each inventory item in the shopping cart when the
purchase goes through.
"""
raise NotImplementedError
def generate_receipt_instructions(self):
"""
This is called on each item in a purchased order to generate receipt instructions.
This should return a list of `ReceiptInstruction`s in HTML string
Default implementation is to return an empty set
"""
return self.pk_with_subclass, set([])
@property
def pk_with_subclass(self):
"""
Returns a named tuple that annotates the pk of this instance with its class, to fully represent
a pk of a subclass (inclusive) of OrderItem
"""
return OrderItemSubclassPK(type(self), self.pk)
@property
def is_discounted(self):
"""
Returns True if the item a discount coupon has been applied to the OrderItem and False otherwise.
Earlier, the OrderItems were stored with an empty list_price if a discount had not been applied.
Now we consider the item to be non discounted if list_price is None or list_price == unit_cost. In
these lines, an item is discounted if it's non-None and list_price and unit_cost mismatch.
This should work with both new and old records.
"""
return self.list_price and self.list_price != self.unit_cost
def get_list_price(self):
"""
Returns the unit_cost if no discount has been applied, or the list_price if it is defined.
"""
return self.list_price if self.list_price else self.unit_cost
@property
def single_item_receipt_template(self):
"""
The template that should be used when there's only one item in the order
"""
return 'shoppingcart/receipt.html'
@property
def single_item_receipt_context(self):
"""
Extra variables needed to render the template specified in
`single_item_receipt_template`
"""
return {}
def additional_instruction_text(self, **kwargs): # pylint: disable=unused-argument
"""
Individual instructions for this order item.
Currently, only used for emails.
"""
return ''
@property
def pdf_receipt_display_name(self):
"""
How to display this item on a PDF printed receipt file.
This can be overridden by the subclasses of OrderItem
"""
course_key = getattr(self, 'course_id', None)
if course_key:
course = get_course_by_id(course_key, depth=0)
return course.display_name
else:
raise Exception(
"Not Implemented. OrderItems that are not Course specific should have"
" a overridden pdf_receipt_display_name property"
)
def analytics_data(self):
"""Simple function used to construct analytics data for the OrderItem.
The default implementation returns defaults for most attributes. When no name or
category is specified by the implementation, the string 'N/A' is placed for the
name and category. This should be handled appropriately by all implementations.
Returns
A dictionary containing analytics data for this OrderItem.
"""
return {
'id': self.id,
'sku': type(self).__name__,
'name': 'N/A',
'price': str(self.unit_cost),
'quantity': self.qty,
'category': 'N/A',
}
def retire(self):
"""
Called by the `retire` method defined in the `Order` class. Retires
an order item if its (and its order's) status was erroneously not
updated to "purchased" after the order was processed.
"""
self.status = ORDER_STATUS_MAP[self.status]
self.save()
class Invoice(TimeStampedModel):
"""
This table capture all the information needed to support "invoicing"
which is when a user wants to purchase Registration Codes,
but will not do so via a Credit Card transaction.
"""
class Meta(object):
app_label = "shoppingcart"
company_name = models.CharField(max_length=255, db_index=True)
company_contact_name = models.CharField(max_length=255)
company_contact_email = models.CharField(max_length=255)
recipient_name = models.CharField(max_length=255)
recipient_email = models.CharField(max_length=255)
address_line_1 = models.CharField(max_length=255)
address_line_2 = models.CharField(max_length=255, null=True, blank=True)
address_line_3 = models.CharField(max_length=255, null=True, blank=True)
city = models.CharField(max_length=255, null=True)
state = models.CharField(max_length=255, null=True)
zip = models.CharField(max_length=15, null=True)
country = models.CharField(max_length=64, null=True)
# This field has been deprecated.
# The total amount can now be calculated as the sum
# of each invoice item associated with the invoice.
# For backwards compatibility, this field is maintained
# and written to during invoice creation.
total_amount = models.FloatField()
# This field has been deprecated in order to support
# invoices for items that are not course-related.
# Although this field is still maintained for backwards
# compatibility, you should use CourseRegistrationCodeInvoiceItem
# to look up the course ID for purchased redeem codes.
course_id = CourseKeyField(max_length=255, db_index=True)
internal_reference = models.CharField(
max_length=255,
null=True,
blank=True,
help_text=ugettext_lazy("Internal reference code for this invoice.")
)
customer_reference_number = models.CharField(
max_length=63,
null=True,
blank=True,
help_text=ugettext_lazy("Customer's reference code for this invoice.")
)
is_valid = models.BooleanField(default=True)
@classmethod
def get_invoice_total_amount_for_course(cls, course_key):
"""
returns the invoice total amount generated by course.
"""
result = cls.objects.filter(course_id=course_key, is_valid=True).aggregate(total=Sum('total_amount'))
total = result.get('total', 0)
return total if total else 0
def generate_pdf_invoice(self, course, course_price, quantity, sale_price):
"""
Generates the pdf invoice for the given course
and returns the pdf_buffer.
"""
discount_per_item = float(course_price) - sale_price / quantity
list_price = course_price - discount_per_item
items_data = [{
'item_description': course.display_name,
'quantity': quantity,
'list_price': list_price,
'discount': discount_per_item,
'item_total': quantity * list_price
}]
pdf_buffer = BytesIO()
PDFInvoice(
items_data=items_data,
item_id=str(self.id),
date=datetime.now(pytz.utc),
is_invoice=True,
total_cost=float(self.total_amount),
payment_received=0,
balance=float(self.total_amount)
).generate_pdf(pdf_buffer)
return pdf_buffer
def snapshot(self):
"""Create a snapshot of the invoice.
A snapshot is a JSON-serializable representation
of the invoice's state, including its line items
and associated transactions (payments/refunds).
This is useful for saving the history of changes
to the invoice.
Returns:
dict
"""
return {
'internal_reference': self.internal_reference,
'customer_reference': self.customer_reference_number,
'is_valid': self.is_valid,
'contact_info': {
'company_name': self.company_name,
'company_contact_name': self.company_contact_name,
'company_contact_email': self.company_contact_email,
'recipient_name': self.recipient_name,
'recipient_email': self.recipient_email,
'address_line_1': self.address_line_1,
'address_line_2': self.address_line_2,
'address_line_3': self.address_line_3,
'city': self.city,
'state': self.state,
'zip': self.zip,
'country': self.country,
},
'items': [
item.snapshot()
for item in InvoiceItem.objects.filter(invoice=self).select_subclasses()
],
'transactions': [
trans.snapshot()
for trans in InvoiceTransaction.objects.filter(invoice=self)
],
}
def __unicode__(self):
label = (
unicode(self.internal_reference)
if self.internal_reference
else u"No label"
)
created = (
self.created.strftime("%Y-%m-%d")
if self.created
else u"No date"
)
return u"{label} ({date_created})".format(
label=label, date_created=created
)
INVOICE_TRANSACTION_STATUSES = (
# A payment/refund is in process, but money has not yet been transferred
('started', 'started'),
# A payment/refund has completed successfully
# This should be set ONLY once money has been successfully exchanged.
('completed', 'completed'),
# A payment/refund was promised, but was cancelled before
# money had been transferred. An example would be
# cancelling a refund check before the recipient has
# a chance to deposit it.
('cancelled', 'cancelled')
)
class InvoiceTransaction(TimeStampedModel):
"""Record payment and refund information for invoices.
There are two expected use cases:
1) We send an invoice to someone, and they send us a check.
We then manually create an invoice transaction to represent
the payment.
2) We send an invoice to someone, and they pay us. Later, we
need to issue a refund for the payment. We manually
create a transaction with a negative amount to represent
the refund.
"""
class Meta(object):
app_label = "shoppingcart"
invoice = models.ForeignKey(Invoice)
amount = models.DecimalField(
default=0.0, decimal_places=2, max_digits=30,
help_text=ugettext_lazy(
"The amount of the transaction. Use positive amounts for payments"
" and negative amounts for refunds."
)
)
currency = models.CharField(
default="usd",
max_length=8,
help_text=ugettext_lazy("Lower-case ISO currency codes")
)
comments = models.TextField(
null=True,
blank=True,
help_text=ugettext_lazy("Optional: provide additional information for this transaction")
)
status = models.CharField(
max_length=32,
default='started',
choices=INVOICE_TRANSACTION_STATUSES,
help_text=ugettext_lazy(
"The status of the payment or refund. "
"'started' means that payment is expected, but money has not yet been transferred. "
"'completed' means that the payment or refund was received. "
"'cancelled' means that payment or refund was expected, but was cancelled before money was transferred. "
)
)
created_by = models.ForeignKey(User)
last_modified_by = models.ForeignKey(User, related_name='last_modified_by_user')
@classmethod
def get_invoice_transaction(cls, invoice_id):
"""
if found Returns the Invoice Transaction object for the given invoice_id
else returns None
"""
try:
return cls.objects.get(Q(invoice_id=invoice_id), Q(status='completed') | Q(status='refunded'))
except InvoiceTransaction.DoesNotExist:
return None
@classmethod
def get_total_amount_of_paid_course_invoices(cls, course_key):
"""
returns the total amount of the paid invoices.
"""
result = cls.objects.filter(amount__gt=0, invoice__course_id=course_key, status='completed').aggregate(
total=Sum(
'amount',
output_field=models.DecimalField(decimal_places=2, max_digits=30)
)
)
total = result.get('total', 0)
return total if total else 0
def snapshot(self):
"""Create a snapshot of the invoice transaction.
The returned dictionary is JSON-serializable.
Returns:
dict
"""
return {
'amount': unicode(self.amount),
'currency': self.currency,
'comments': self.comments,
'status': self.status,
'created_by': self.created_by.username,
'last_modified_by': self.last_modified_by.username
}
class InvoiceItem(TimeStampedModel):
"""
This is the basic interface for invoice items.
Each invoice item represents a "line" in the invoice.
For example, in an invoice for course registration codes,
there might be an invoice item representing 10 registration
codes for the DemoX course.
"""
class Meta(object):
app_label = "shoppingcart"
objects = InheritanceManager()
invoice = models.ForeignKey(Invoice, db_index=True)
qty = models.IntegerField(
default=1,
help_text=ugettext_lazy("The number of items sold.")
)
unit_price = models.DecimalField(
default=0.0,
decimal_places=2,
max_digits=30,
help_text=ugettext_lazy("The price per item sold, including discounts.")
)
currency = models.CharField(
default="usd",
max_length=8,
help_text=ugettext_lazy("Lower-case ISO currency codes")
)
def snapshot(self):
"""Create a snapshot of the invoice item.
The returned dictionary is JSON-serializable.
Returns:
dict
"""
return {
'qty': self.qty,
'unit_price': unicode(self.unit_price),
'currency': self.currency
}
class CourseRegistrationCodeInvoiceItem(InvoiceItem):
"""
This is an invoice item that represents a payment for
a course registration.
"""
class Meta(object):
app_label = "shoppingcart"
course_id = CourseKeyField(max_length=128, db_index=True)
def snapshot(self):
"""Create a snapshot of the invoice item.
This is the same as a snapshot for other invoice items,
with the addition of a `course_id` field.
Returns:
dict
"""
snapshot = super(CourseRegistrationCodeInvoiceItem, self).snapshot()
snapshot['course_id'] = unicode(self.course_id)
return snapshot
class InvoiceHistory(models.Model):
"""History of changes to invoices.
This table stores snapshots of invoice state,
including the associated line items and transactions
(payments/refunds).
Entries in the table are created, but never deleted
or modified.
We use Django signals to save history entries on change
events. These signals are fired within a database
transaction, so the history record is created only
if the invoice change is successfully persisted.
"""
timestamp = models.DateTimeField(auto_now_add=True, db_index=True)
invoice = models.ForeignKey(Invoice)
# JSON-serialized representation of the current state
# of the invoice, including its line items and
# transactions (payments/refunds).
snapshot = models.TextField(blank=True)
@classmethod
def save_invoice_snapshot(cls, invoice):
"""Save a snapshot of the invoice's current state.
Arguments:
invoice (Invoice): The invoice to save.
"""
cls.objects.create(
invoice=invoice,
snapshot=json.dumps(invoice.snapshot())
)
@staticmethod
def snapshot_receiver(sender, instance, **kwargs): # pylint: disable=unused-argument
"""Signal receiver that saves a snapshot of an invoice.
Arguments:
sender: Not used, but required by Django signals.
instance (Invoice, InvoiceItem, or InvoiceTransaction)
"""
if isinstance(instance, Invoice):
InvoiceHistory.save_invoice_snapshot(instance)
elif hasattr(instance, 'invoice'):
InvoiceHistory.save_invoice_snapshot(instance.invoice)
class Meta(object):
get_latest_by = "timestamp"
app_label = "shoppingcart"
# Hook up Django signals to record changes in the history table.
# We record any change to an invoice, invoice item, or transaction.
# We also record any deletion of a transaction, since users can delete
# transactions via Django admin.
# Note that we need to include *each* InvoiceItem subclass
# here, since Django signals do not fire automatically for subclasses
# of the "sender" class.
post_save.connect(InvoiceHistory.snapshot_receiver, sender=Invoice)
post_save.connect(InvoiceHistory.snapshot_receiver, sender=InvoiceItem)
post_save.connect(InvoiceHistory.snapshot_receiver, sender=CourseRegistrationCodeInvoiceItem)
post_save.connect(InvoiceHistory.snapshot_receiver, sender=InvoiceTransaction)
post_delete.connect(InvoiceHistory.snapshot_receiver, sender=InvoiceTransaction)
class CourseRegistrationCode(models.Model):
"""
This table contains registration codes
With registration code, a user can register for a course for free
"""
class Meta(object):
app_label = "shoppingcart"
code = models.CharField(max_length=32, db_index=True, unique=True)
course_id = CourseKeyField(max_length=255, db_index=True)
created_by = models.ForeignKey(User, related_name='created_by_user')
created_at = models.DateTimeField(auto_now_add=True)
order = models.ForeignKey(Order, db_index=True, null=True, related_name="purchase_order")
mode_slug = models.CharField(max_length=100, null=True)
is_valid = models.BooleanField(default=True)
# For backwards compatibility, we maintain the FK to "invoice"
# In the future, we will remove this in favor of the FK
# to "invoice_item" (which can be used to look up the invoice).
invoice = models.ForeignKey(Invoice, null=True)
invoice_item = models.ForeignKey(CourseRegistrationCodeInvoiceItem, null=True)
@classmethod
def order_generated_registration_codes(cls, course_id):
"""
Returns the registration codes that were generated
via bulk purchase scenario.
"""
return cls.objects.filter(order__isnull=False, course_id=course_id)
@classmethod
def invoice_generated_registration_codes(cls, course_id):
"""
Returns the registration codes that were generated
via invoice.
"""
return cls.objects.filter(invoice__isnull=False, course_id=course_id)
class RegistrationCodeRedemption(models.Model):
"""
This model contains the registration-code redemption info
"""
class Meta(object):
app_label = "shoppingcart"
order = models.ForeignKey(Order, db_index=True, null=True)
registration_code = models.ForeignKey(CourseRegistrationCode, db_index=True)
redeemed_by = models.ForeignKey(User, db_index=True)
redeemed_at = models.DateTimeField(auto_now_add=True, null=True)
course_enrollment = models.ForeignKey(CourseEnrollment, null=True)
@classmethod
def registration_code_used_for_enrollment(cls, course_enrollment):
"""
Returns RegistrationCodeRedemption object if registration code
has been used during the course enrollment else Returns None.
"""
# theoretically there could be more than one (e.g. someone self-unenrolls
# then re-enrolls with a different regcode)
reg_codes = cls.objects.filter(course_enrollment=course_enrollment).order_by('-redeemed_at')
if reg_codes:
# return the first one. In all normal use cases of registration codes
# the user will only have one
return reg_codes[0]
return None
@classmethod
def is_registration_code_redeemed(cls, course_reg_code):
"""
Checks the existence of the registration code
in the RegistrationCodeRedemption
"""
return cls.objects.filter(registration_code__code=course_reg_code).exists()
@classmethod
def get_registration_code_redemption(cls, code, course_id):
"""
Returns the registration code redemption object if found else returns None.
"""
try:
code_redemption = cls.objects.get(registration_code__code=code, registration_code__course_id=course_id)
except cls.DoesNotExist:
code_redemption = None
return code_redemption
@classmethod
def create_invoice_generated_registration_redemption(cls, course_reg_code, user): # pylint: disable=invalid-name
"""
This function creates a RegistrationCodeRedemption entry in case the registration codes were invoice generated
and thus the order_id is missing.
"""
code_redemption = RegistrationCodeRedemption(registration_code=course_reg_code, redeemed_by=user)
code_redemption.save()
return code_redemption
class SoftDeleteCouponManager(models.Manager):
""" Use this manager to get objects that have a is_active=True """
def get_active_coupons_queryset(self):
"""
filter the is_active = True Coupons only
"""
return super(SoftDeleteCouponManager, self).get_queryset().filter(is_active=True)
def get_queryset(self):
"""
get all the coupon objects
"""
return super(SoftDeleteCouponManager, self).get_queryset()
class Coupon(models.Model):
"""
This table contains coupon codes
A user can get a discount offer on course if provide coupon code
"""
class Meta(object):
app_label = "shoppingcart"
code = models.CharField(max_length=32, db_index=True)
description = models.CharField(max_length=255, null=True, blank=True)
course_id = CourseKeyField(max_length=255)
percentage_discount = models.IntegerField(default=0)
created_by = models.ForeignKey(User)
created_at = models.DateTimeField(auto_now_add=True)
is_active = models.BooleanField(default=True)
expiration_date = models.DateTimeField(null=True, blank=True)
def __unicode__(self):
return "[Coupon] code: {} course: {}".format(self.code, self.course_id)
objects = SoftDeleteCouponManager()
@property
def display_expiry_date(self):
"""
return the coupon expiration date in the readable format
"""
return (self.expiration_date - timedelta(days=1)).strftime("%B %d, %Y") if self.expiration_date else None
class CouponRedemption(models.Model):
"""
This table contain coupon redemption info
"""
class Meta(object):
app_label = "shoppingcart"
order = models.ForeignKey(Order, db_index=True)
user = models.ForeignKey(User, db_index=True)
coupon = models.ForeignKey(Coupon, db_index=True)
@classmethod
def remove_code_redemption_from_item(cls, item, user):
"""
If an item removed from shopping cart then we will remove
the corresponding redemption info of coupon code
"""
order_item_course_id = item.course_id
try:
# Try to remove redemption information of coupon code, If exist.
coupon_redemption = cls.objects.get(
user=user,
coupon__course_id=order_item_course_id if order_item_course_id else CourseKeyField.Empty,
order=item.order_id
)
coupon_redemption.delete()
log.info(
u'Coupon "%s" redemption entry removed for user "%s" for order item "%s"',
coupon_redemption.coupon.code,
user,
str(item.id),
)
except CouponRedemption.DoesNotExist:
log.debug(u'Code redemption does not exist for order item id=%s.', str(item.id))
@classmethod
def remove_coupon_redemption_from_cart(cls, user, cart):
"""
This method delete coupon redemption
"""
coupon_redemption = cls.objects.filter(user=user, order=cart)
if coupon_redemption:
coupon_redemption.delete()
log.info(u'Coupon redemption entry removed for user %s for order %s', user, cart.id)
@classmethod
def get_discount_price(cls, percentage_discount, value):
"""
return discounted price against coupon
"""
discount = Decimal("{0:.2f}".format(Decimal(percentage_discount / 100.00) * value))
return value - discount
@classmethod
def add_coupon_redemption(cls, coupon, order, cart_items):
"""
add coupon info into coupon_redemption model
"""
is_redemption_applied = False
coupon_redemptions = cls.objects.filter(order=order, user=order.user)
for coupon_redemption in coupon_redemptions:
if coupon_redemption.coupon.code != coupon.code or coupon_redemption.coupon.id == coupon.id:
log.exception(
u"Coupon redemption already exist for user '%s' against order id '%s'",
order.user.username,
order.id,
)
raise MultipleCouponsNotAllowedException
for item in cart_items:
if item.course_id:
if item.course_id == coupon.course_id:
coupon_redemption = cls(order=order, user=order.user, coupon=coupon)
coupon_redemption.save()
discount_price = cls.get_discount_price(coupon.percentage_discount, item.unit_cost)
item.list_price = item.unit_cost
item.unit_cost = discount_price
item.save()
log.info(
u"Discount generated for user %s against order id '%s'",
order.user.username,
order.id,
)
is_redemption_applied = True
return is_redemption_applied
return is_redemption_applied
@classmethod
def get_top_discount_codes_used(cls, course_id):
"""
Returns the top discount codes used.
QuerySet = [
{
'coupon__percentage_discount': 22,
'coupon__code': '12',
'coupon__used_count': '2',
},
{
...
}
]
"""
return cls.objects.filter(order__status='purchased', coupon__course_id=course_id).values(
'coupon__code', 'coupon__percentage_discount'
).annotate(coupon__used_count=Count('coupon__code')).order_by('-coupon__used_count')
@classmethod
def get_total_coupon_code_purchases(cls, course_id):
"""
returns total seats purchases using coupon codes
"""
return cls.objects.filter(order__status='purchased', coupon__course_id=course_id).aggregate(Count('coupon'))
class PaidCourseRegistration(OrderItem):
"""
This is an inventory item for paying for a course registration
"""
class Meta(object):
app_label = "shoppingcart"
course_id = CourseKeyField(max_length=128, db_index=True)
mode = models.SlugField(default=CourseMode.DEFAULT_SHOPPINGCART_MODE_SLUG)
course_enrollment = models.ForeignKey(CourseEnrollment, null=True)
@classmethod
def get_self_purchased_seat_count(cls, course_key, status='purchased'):
"""
returns the count of paid_course items filter by course_id and status.
"""
return cls.objects.filter(course_id=course_key, status=status).count()
@classmethod
def get_course_item_for_user_enrollment(cls, user, course_id, course_enrollment):
"""
Returns PaidCourseRegistration object if user has payed for
the course enrollment else Returns None
"""
try:
return cls.objects.filter(course_id=course_id, user=user, course_enrollment=course_enrollment,
status='purchased').latest('id')
except PaidCourseRegistration.DoesNotExist:
return None
@classmethod
def contained_in_order(cls, order, course_id):
"""
Is the course defined by course_id contained in the order?
"""
return course_id in [
item.course_id
for item in order.orderitem_set.all().select_subclasses("paidcourseregistration")
if isinstance(item, cls)
]
@classmethod
def get_total_amount_of_purchased_item(cls, course_key, status='purchased'):
"""
This will return the total amount of money that a purchased course generated
"""
total_cost = 0
result = cls.objects.filter(course_id=course_key, status=status).aggregate(
total=Sum(
F('qty') * F('unit_cost'),
output_field=models.DecimalField(decimal_places=2, max_digits=30)
)
)
if result['total'] is not None:
total_cost = result['total']
return total_cost
@classmethod
@transaction.atomic
def add_to_order(cls, order, course_id, mode_slug=CourseMode.DEFAULT_SHOPPINGCART_MODE_SLUG,
cost=None, currency=None): # pylint: disable=arguments-differ
"""
A standardized way to create these objects, with sensible defaults filled in.
Will update the cost if called on an order that already carries the course.
Returns the order item
"""
# First a bunch of sanity checks:
# actually fetch the course to make sure it exists, use this to
# throw errors if it doesn't.
course = modulestore().get_course(course_id)
if not course:
log.error("User {} tried to add non-existent course {} to cart id {}"
.format(order.user.email, course_id, order.id))
raise CourseDoesNotExistException
if cls.contained_in_order(order, course_id):
log.warning(
u"User %s tried to add PaidCourseRegistration for course %s, already in cart id %s",
order.user.email,
course_id,
order.id,
)
raise ItemAlreadyInCartException
if CourseEnrollment.is_enrolled(user=order.user, course_key=course_id):
log.warning("User {} trying to add course {} to cart id {}, already registered"
.format(order.user.email, course_id, order.id))
raise AlreadyEnrolledInCourseException
### Validations done, now proceed
### handle default arguments for mode_slug, cost, currency
course_mode = CourseMode.mode_for_course(course_id, mode_slug)
if not course_mode:
# user could have specified a mode that's not set, in that case return the DEFAULT_MODE
course_mode = CourseMode.DEFAULT_SHOPPINGCART_MODE
if not cost:
cost = course_mode.min_price
if not currency:
currency = course_mode.currency
super(PaidCourseRegistration, cls).add_to_order(order, course_id, cost, currency=currency)
item, __ = cls.objects.get_or_create(order=order, user=order.user, course_id=course_id)
item.status = order.status
item.mode = course_mode.slug
item.qty = 1
item.unit_cost = cost
item.list_price = cost
item.line_desc = _(u'Registration for Course: {course_name}').format(
course_name=course.display_name_with_default_escaped)
item.currency = currency
order.currency = currency
item.report_comments = item.csv_report_comments
order.save()
item.save()
log.info("User {} added course registration {} to cart: order {}"
.format(order.user.email, course_id, order.id))
CourseEnrollment.send_signal_full(EnrollStatusChange.paid_start,
user=order.user, mode=item.mode, course_id=course_id,
cost=cost, currency=currency)
return item
def purchased_callback(self):
"""
When purchased, this should enroll the user in the course. We are assuming that
course settings for enrollment date are configured such that only if the (user.email, course_id) pair is found
in CourseEnrollmentAllowed will the user be allowed to enroll. Otherwise requiring payment
would in fact be quite silly since there's a clear back door.
"""
if not modulestore().has_course(self.course_id):
msg = u"The customer purchased Course {0}, but that course doesn't exist!".format(self.course_id)
log.error(msg)
raise PurchasedCallbackException(msg)
# enroll in course and link to the enrollment_id
self.course_enrollment = CourseEnrollment.enroll(user=self.user, course_key=self.course_id, mode=self.mode)
self.save()
log.info("Enrolled {0} in paid course {1}, paid ${2}"
.format(self.user.email, self.course_id, self.line_cost))
self.course_enrollment.send_signal(EnrollStatusChange.paid_complete,
cost=self.line_cost, currency=self.currency)
def generate_receipt_instructions(self):
"""
Generates instructions when the user has purchased a PaidCourseRegistration.
Basically tells the user to visit the dashboard to see their new classes
"""
notification = _(
u"Please visit your {link_start}dashboard{link_end} "
u"to see your new course."
).format(
link_start=u'<a href="{url}">'.format(url=reverse('dashboard')),
link_end=u'</a>',
)
return self.pk_with_subclass, set([notification])
@property
def csv_report_comments(self):
"""
Tries to fetch an annotation associated with the course_id from the database. If not found, returns u"".
Otherwise returns the annotation
"""
try:
return PaidCourseRegistrationAnnotation.objects.get(course_id=self.course_id).annotation
except PaidCourseRegistrationAnnotation.DoesNotExist:
return u""
def analytics_data(self):
"""Simple function used to construct analytics data for the OrderItem.
If the Order Item is associated with a course, additional fields will be populated with
course information. If there is a mode associated, the mode data is included in the SKU.
Returns
A dictionary containing analytics data for this OrderItem.
"""
data = super(PaidCourseRegistration, self).analytics_data()
sku = data['sku']
if self.course_id != CourseKeyField.Empty:
data['name'] = unicode(self.course_id)
data['category'] = unicode(self.course_id.org)
if self.mode:
data['sku'] = sku + u'.' + unicode(self.mode)
return data
class CourseRegCodeItem(OrderItem):
"""
This is an inventory item for paying for
generating course registration codes
"""
class Meta(object):
app_label = "shoppingcart"
course_id = CourseKeyField(max_length=128, db_index=True)
mode = models.SlugField(default=CourseMode.DEFAULT_SHOPPINGCART_MODE_SLUG)
@classmethod
def get_bulk_purchased_seat_count(cls, course_key, status='purchased'):
"""
returns the sum of bulk purchases seats.
"""
total = 0
result = cls.objects.filter(course_id=course_key, status=status).aggregate(total=Sum('qty'))
if result['total'] is not None:
total = result['total']
return total
@classmethod
def contained_in_order(cls, order, course_id):
"""
Is the course defined by course_id contained in the order?
"""
return course_id in [
item.course_id
for item in order.orderitem_set.all().select_subclasses("courseregcodeitem")
if isinstance(item, cls)
]
@classmethod
def get_total_amount_of_purchased_item(cls, course_key, status='purchased'):
"""
This will return the total amount of money that a purchased course generated
"""
total_cost = 0
result = cls.objects.filter(course_id=course_key, status=status).aggregate(
total=Sum(
F('qty') * F('unit_cost'),
output_field=models.DecimalField(decimal_places=2, max_digits=30)
)
)
if result['total'] is not None:
total_cost = result['total']
return total_cost
@classmethod
@transaction.atomic
def add_to_order(cls, order, course_id, qty, mode_slug=CourseMode.DEFAULT_SHOPPINGCART_MODE_SLUG,
cost=None, currency=None): # pylint: disable=arguments-differ
"""
A standardized way to create these objects, with sensible defaults filled in.
Will update the cost if called on an order that already carries the course.
Returns the order item
"""
# First a bunch of sanity checks:
# actually fetch the course to make sure it exists, use this to
# throw errors if it doesn't.
course = modulestore().get_course(course_id)
if not course:
log.error("User {} tried to add non-existent course {} to cart id {}"
.format(order.user.email, course_id, order.id))
raise CourseDoesNotExistException
if cls.contained_in_order(order, course_id):
log.warning("User {} tried to add PaidCourseRegistration for course {}, already in cart id {}"
.format(order.user.email, course_id, order.id))
raise ItemAlreadyInCartException
if CourseEnrollment.is_enrolled(user=order.user, course_key=course_id):
log.warning("User {} trying to add course {} to cart id {}, already registered"
.format(order.user.email, course_id, order.id))
raise AlreadyEnrolledInCourseException
### Validations done, now proceed
### handle default arguments for mode_slug, cost, currency
course_mode = CourseMode.mode_for_course(course_id, mode_slug)
if not course_mode:
# user could have specified a mode that's not set, in that case return the DEFAULT_SHOPPINGCART_MODE
course_mode = CourseMode.DEFAULT_SHOPPINGCART_MODE
if not cost:
cost = course_mode.min_price
if not currency:
currency = course_mode.currency
super(CourseRegCodeItem, cls).add_to_order(order, course_id, cost, currency=currency)
item, created = cls.objects.get_or_create(order=order, user=order.user, course_id=course_id) # pylint: disable=unused-variable
item.status = order.status
item.mode = course_mode.slug
item.unit_cost = cost
item.list_price = cost
item.qty = qty
item.line_desc = _(u'Enrollment codes for Course: {course_name}').format(
course_name=course.display_name_with_default_escaped)
item.currency = currency
order.currency = currency
item.report_comments = item.csv_report_comments
order.save()
item.save()
log.info("User {} added course registration {} to cart: order {}"
.format(order.user.email, course_id, order.id))
return item
def purchased_callback(self):
"""
The purchase is completed, this OrderItem type will generate Registration Codes that will
be redeemed by users
"""
if not modulestore().has_course(self.course_id):
msg = u"The customer purchased Course {0}, but that course doesn't exist!".format(self.course_id)
log.error(msg)
raise PurchasedCallbackException(msg)
total_registration_codes = int(self.qty)
# we need to import here because of a circular dependency
# we should ultimately refactor code to have save_registration_code in this models.py
# file, but there's also a shared dependency on a random string generator which
# is in another PR (for another feature)
from instructor.views.api import save_registration_code
for i in range(total_registration_codes): # pylint: disable=unused-variable
save_registration_code(self.user, self.course_id, self.mode, order=self.order)
log.info("Enrolled {0} in paid course {1}, paid ${2}"
.format(self.user.email, self.course_id, self.line_cost))
@property
def csv_report_comments(self):
"""
Tries to fetch an annotation associated with the course_id from the database. If not found, returns u"".
Otherwise returns the annotation
"""
try:
return CourseRegCodeItemAnnotation.objects.get(course_id=self.course_id).annotation
except CourseRegCodeItemAnnotation.DoesNotExist:
return u""
def analytics_data(self):
"""Simple function used to construct analytics data for the OrderItem.
If the OrderItem is associated with a course, additional fields will be populated with
course information. If a mode is available, it will be included in the SKU.
Returns
A dictionary containing analytics data for this OrderItem.
"""
data = super(CourseRegCodeItem, self).analytics_data()
sku = data['sku']
if self.course_id != CourseKeyField.Empty:
data['name'] = unicode(self.course_id)
data['category'] = unicode(self.course_id.org)
if self.mode:
data['sku'] = sku + u'.' + unicode(self.mode)
return data
class CourseRegCodeItemAnnotation(models.Model):
"""
A model that maps course_id to an additional annotation. This is specifically needed because when Stanford
generates report for the paid courses, each report item must contain the payment account associated with a course.
And unfortunately we didn't have the concept of a "SKU" or stock item where we could keep this association,
so this is to retrofit it.
"""
class Meta(object):
app_label = "shoppingcart"
course_id = CourseKeyField(unique=True, max_length=128, db_index=True)
annotation = models.TextField(null=True)
def __unicode__(self):
# pylint: disable=no-member
return u"{} : {}".format(self.course_id.to_deprecated_string(), self.annotation)
class PaidCourseRegistrationAnnotation(models.Model):
"""
A model that maps course_id to an additional annotation. This is specifically needed because when Stanford
generates report for the paid courses, each report item must contain the payment account associated with a course.
And unfortunately we didn't have the concept of a "SKU" or stock item where we could keep this association,
so this is to retrofit it.
"""
class Meta(object):
app_label = "shoppingcart"
course_id = CourseKeyField(unique=True, max_length=128, db_index=True)
annotation = models.TextField(null=True)
def __unicode__(self):
# pylint: disable=no-member
return u"{} : {}".format(self.course_id.to_deprecated_string(), self.annotation)
class CertificateItem(OrderItem):
"""
This is an inventory item for purchasing certificates
"""
class Meta(object):
app_label = "shoppingcart"
course_id = CourseKeyField(max_length=128, db_index=True)
course_enrollment = models.ForeignKey(CourseEnrollment)
mode = models.SlugField()
@receiver(UNENROLL_DONE)
def refund_cert_callback(sender, course_enrollment=None, skip_refund=False, **kwargs): # pylint: disable=no-self-argument,unused-argument
"""
When a CourseEnrollment object calls its unenroll method, this function checks to see if that unenrollment
occurred in a verified certificate that was within the refund deadline. If so, it actually performs the
refund.
Returns the refunded certificate on a successful refund; else, it returns nothing.
"""
# Only refund verified cert unenrollments that are within bounds of the expiration date
if (not course_enrollment.refundable()) or skip_refund:
return
target_certs = CertificateItem.objects.filter(course_id=course_enrollment.course_id, user_id=course_enrollment.user, status='purchased', mode='verified')
try:
target_cert = target_certs[0]
except IndexError:
log.warning(
u"Matching CertificateItem not found while trying to refund. User %s, Course %s",
course_enrollment.user,
course_enrollment.course_id,
)
return
target_cert.status = 'refunded'
target_cert.refund_requested_time = datetime.now(pytz.utc)
target_cert.save()
target_cert.order.refund()
order_number = target_cert.order_id
# send billing an email so they can handle refunding
subject = _("[Refund] User-Requested Refund")
message = "User {user} ({user_email}) has requested a refund on Order #{order_number}.".format(user=course_enrollment.user,
user_email=course_enrollment.user.email,
order_number=order_number)
to_email = [settings.PAYMENT_SUPPORT_EMAIL]
from_email = configuration_helpers.get_value('payment_support_email', settings.PAYMENT_SUPPORT_EMAIL)
try:
send_mail(subject, message, from_email, to_email, fail_silently=False)
except Exception as exception: # pylint: disable=broad-except
err_str = ('Failed sending email to billing to request a refund for verified certificate'
' (User {user}, Course {course}, CourseEnrollmentID {ce_id}, Order #{order})\n{exception}')
log.error(err_str.format(
user=course_enrollment.user,
course=course_enrollment.course_id,
ce_id=course_enrollment.id,
order=order_number,
exception=exception,
))
return target_cert
@classmethod
@transaction.atomic
def add_to_order(cls, order, course_id, cost, mode, currency='usd'):
"""
Add a CertificateItem to an order
Returns the CertificateItem object after saving
`order` - an order that this item should be added to, generally the cart order
`course_id` - the course that we would like to purchase as a CertificateItem
`cost` - the amount the user will be paying for this CertificateItem
`mode` - the course mode that this certificate is going to be issued for
This item also creates a new enrollment if none exists for this user and this course.
Example Usage:
cart = Order.get_cart_for_user(user)
CertificateItem.add_to_order(cart, 'edX/Test101/2013_Fall', 30, 'verified')
"""
super(CertificateItem, cls).add_to_order(order, course_id, cost, currency=currency)
course_enrollment = CourseEnrollment.get_or_create_enrollment(order.user, course_id)
# do some validation on the enrollment mode
valid_modes = CourseMode.modes_for_course_dict(course_id)
if mode in valid_modes:
mode_info = valid_modes[mode]
else:
msg = u"Mode {mode} does not exist for {course_id}".format(mode=mode, course_id=course_id)
log.error(msg)
raise InvalidCartItem(
_(u"Mode {mode} does not exist for {course_id}").format(mode=mode, course_id=course_id)
)
item, _created = cls.objects.get_or_create(
order=order,
user=order.user,
course_id=course_id,
course_enrollment=course_enrollment,
mode=mode,
)
item.status = order.status
item.qty = 1
item.unit_cost = cost
item.list_price = cost
course_name = modulestore().get_course(course_id).display_name
# Translators: In this particular case, mode_name refers to a
# particular mode (i.e. Honor Code Certificate, Verified Certificate, etc)
# by which a user could enroll in the given course.
item.line_desc = _("{mode_name} for course {course}").format(
mode_name=mode_info.name,
course=course_name
)
item.currency = currency
order.currency = currency
order.save()
item.save()
# signal course added to cart
course_enrollment.send_signal(EnrollStatusChange.paid_start, cost=cost, currency=currency)
return item
def purchased_callback(self):
"""
When purchase goes through, activate and update the course enrollment for the correct mode
"""
self.course_enrollment.change_mode(self.mode)
self.course_enrollment.activate()
self.course_enrollment.send_signal(EnrollStatusChange.upgrade_complete,
cost=self.unit_cost, currency=self.currency)
def additional_instruction_text(self):
verification_reminder = ""
refund_reminder_msg = _("You can unenroll in the course and receive a full refund for 14 days after the course "
"start date. ")
is_enrollment_mode_verified = self.course_enrollment.is_verified_enrollment()
is_professional_mode_verified = self.course_enrollment.is_professional_enrollment()
if is_enrollment_mode_verified:
domain = configuration_helpers.get_value('SITE_NAME', settings.SITE_NAME)
path = reverse('verify_student_verify_now', kwargs={'course_id': unicode(self.course_id)})
verification_url = "http://{domain}{path}".format(domain=domain, path=path)
verification_reminder = _(
"If you haven't verified your identity yet, please start the verification process ({verification_url})."
).format(verification_url=verification_url)
if is_professional_mode_verified:
refund_reminder_msg = _("You can unenroll in the course and receive a full refund for 2 days after the "
"course start date. ")
refund_reminder = _(
"{refund_reminder_msg}"
"To receive your refund, contact {billing_email}. "
"Please include your order number in your email. "
"Please do NOT include your credit card information."
).format(
refund_reminder_msg=refund_reminder_msg,
billing_email=settings.PAYMENT_SUPPORT_EMAIL
)
# Need this to be unicode in case the reminder strings
# have been translated and contain non-ASCII unicode
return u"{verification_reminder} {refund_reminder}".format(
verification_reminder=verification_reminder,
refund_reminder=refund_reminder
)
@classmethod
def verified_certificates_count(cls, course_id, status):
"""Return a queryset of CertificateItem for every verified enrollment in course_id with the given status."""
return use_read_replica_if_available(
CertificateItem.objects.filter(course_id=course_id, mode='verified', status=status).count())
# TODO combine these three methods into one
@classmethod
def verified_certificates_monetary_field_sum(cls, course_id, status, field_to_aggregate):
"""
Returns a Decimal indicating the total sum of field_to_aggregate for all verified certificates with a particular status.
Sample usages:
- status 'refunded' and field_to_aggregate 'unit_cost' will give the total amount of money refunded for course_id
- status 'purchased' and field_to_aggregate 'service_fees' gives the sum of all service fees for purchased certificates
etc
"""
query = use_read_replica_if_available(
CertificateItem.objects.filter(course_id=course_id, mode='verified', status=status)).aggregate(Sum(field_to_aggregate))[field_to_aggregate + '__sum']
if query is None:
return Decimal(0.00)
else:
return query
@classmethod
def verified_certificates_contributing_more_than_minimum(cls, course_id):
return use_read_replica_if_available(
CertificateItem.objects.filter(
course_id=course_id,
mode='verified',
status='purchased',
unit_cost__gt=(CourseMode.min_course_price_for_verified_for_currency(course_id, 'usd')))).count()
def analytics_data(self):
"""Simple function used to construct analytics data for the OrderItem.
If the CertificateItem is associated with a course, additional fields will be populated with
course information. If there is a mode associated with the certificate, it is included in the SKU.
Returns
A dictionary containing analytics data for this OrderItem.
"""
data = super(CertificateItem, self).analytics_data()
sku = data['sku']
if self.course_id != CourseKeyField.Empty:
data['name'] = unicode(self.course_id)
data['category'] = unicode(self.course_id.org)
if self.mode:
data['sku'] = sku + u'.' + unicode(self.mode)
return data
class DonationConfiguration(ConfigurationModel):
"""Configure whether donations are enabled on the site."""
class Meta(ConfigurationModel.Meta):
app_label = "shoppingcart"
class Donation(OrderItem):
"""A donation made by a user.
Donations can be made for a specific course or to the organization as a whole.
Users can choose the donation amount.
"""
class Meta(object):
app_label = "shoppingcart"
# Types of donations
DONATION_TYPES = (
("general", "A general donation"),
("course", "A donation to a particular course")
)
# The type of donation
donation_type = models.CharField(max_length=32, default="general", choices=DONATION_TYPES)
# If a donation is made for a specific course, then store the course ID here.
# If the donation is made to the organization as a whole,
# set this field to CourseKeyField.Empty
course_id = CourseKeyField(max_length=255, db_index=True)
@classmethod
@transaction.atomic
def add_to_order(cls, order, donation_amount, course_id=None, currency='usd'):
"""Add a donation to an order.
Args:
order (Order): The order to add this donation to.
donation_amount (Decimal): The amount the user is donating.
Keyword Args:
course_id (CourseKey): If provided, associate this donation with a particular course.
currency (str): The currency used for the the donation.
Raises:
InvalidCartItem: The provided course ID is not valid.
Returns:
Donation
"""
# This will validate the currency but won't actually add the item to the order.
super(Donation, cls).add_to_order(order, currency=currency)
# Create a line item description, including the name of the course
# if this is a per-course donation.
# This will raise an exception if the course can't be found.
description = cls._line_item_description(course_id=course_id)
params = {
"order": order,
"user": order.user,
"status": order.status,
"qty": 1,
"unit_cost": donation_amount,
"currency": currency,
"line_desc": description
}
if course_id is not None:
params["course_id"] = course_id
params["donation_type"] = "course"
else:
params["donation_type"] = "general"
return cls.objects.create(**params)
def purchased_callback(self):
"""Donations do not need to be fulfilled, so this method does nothing."""
pass
def generate_receipt_instructions(self):
"""Provide information about tax-deductible donations in the receipt.
Returns:
tuple of (Donation, unicode)
"""
return self.pk_with_subclass, set([self._tax_deduction_msg()])
def additional_instruction_text(self, **kwargs):
"""Provide information about tax-deductible donations in the confirmation email.
Returns:
unicode
"""
return self._tax_deduction_msg()
def _tax_deduction_msg(self):
"""Return the translated version of the tax deduction message.
Returns:
unicode
"""
return _(
u"We greatly appreciate this generous contribution and your support of the {platform_name} mission. "
u"This receipt was prepared to support charitable contributions for tax purposes. "
u"We confirm that neither goods nor services were provided in exchange for this gift."
).format(platform_name=configuration_helpers.get_value('PLATFORM_NAME', settings.PLATFORM_NAME))
@classmethod
def _line_item_description(cls, course_id=None):
"""Create a line-item description for the donation.
Includes the course display name if provided.
Keyword Arguments:
course_id (CourseKey)
Raises:
CourseDoesNotExistException: The course ID is not valid.
Returns:
unicode
"""
# If a course ID is provided, include the display name of the course
# in the line item description.
if course_id is not None:
course = modulestore().get_course(course_id)
if course is None:
msg = u"Could not find a course with the ID '{course_id}'".format(course_id=course_id)
log.error(msg)
raise CourseDoesNotExistException(
_(u"Could not find a course with the ID '{course_id}'").format(course_id=course_id)
)
return _(u"Donation for {course}").format(course=course.display_name)
# The donation is for the organization as a whole, not a specific course
else:
return _(u"Donation for {platform_name}").format(
platform_name=configuration_helpers.get_value('PLATFORM_NAME', settings.PLATFORM_NAME),
)
@property
def single_item_receipt_context(self):
return {
'receipt_has_donation_item': True,
}
def analytics_data(self):
"""Simple function used to construct analytics data for the OrderItem.
If the donation is associated with a course, additional fields will be populated with
course information. When no name or category is specified by the implementation, the
platform name is used as a default value for required event fields, to declare that
the Order is specific to the platform, rather than a specific product name or category.
Returns
A dictionary containing analytics data for this OrderItem.
"""
data = super(Donation, self).analytics_data()
if self.course_id != CourseKeyField.Empty:
data['name'] = unicode(self.course_id)
data['category'] = unicode(self.course_id.org)
else:
data['name'] = configuration_helpers.get_value('PLATFORM_NAME', settings.PLATFORM_NAME)
data['category'] = configuration_helpers.get_value('PLATFORM_NAME', settings.PLATFORM_NAME)
return data
@property
def pdf_receipt_display_name(self):
"""
How to display this item on a PDF printed receipt file.
"""
return self._line_item_description(course_id=self.course_id)
|
louyihua/edx-platform
|
lms/djangoapps/shoppingcart/models.py
|
Python
|
agpl-3.0
| 91,614
|
[
"VisIt"
] |
b6e96c1a57c3b7548dc5c9d40ddd185de0139f802dda84156bb9b82cf968c6c1
|
#-------------------------------------------------------------------------------
# . File : QMCallerGaussian.py
# . Program : MolarisTools
# . Copyright : USC, Mikolaj Feliks (2015-2018)
# . License : GNU GPL v3.0 (http://www.gnu.org/licenses/gpl-3.0.en.html)
#-------------------------------------------------------------------------------
import subprocess, os.path, exceptions, collections
from MolarisTools.Utilities import WriteData
from MolarisTools.Parser import GaussianOutputFile
from MolarisTools.QMMM import QMCaller, CS_MULLIKEN, CS_CHELPG, CS_MERZKOLLMAN
Force = collections.namedtuple ("Force" , "x y z")
class QMCallerGaussian (QMCaller):
"""A class to provide communication between Molaris and Gaussian."""
# . Options specific to Gaussian
# . Memory is given in GB
# . env may define variables such as GAUSS_EXEDIR and GAUSS_SCRDIR
# . restart means to reuse the wavefunction from the checkpoint file
defaultAttributes = {
"env" : None ,
"ncpu" : 1 ,
"memory" : 1 ,
"method" : "B3LYP/6-31G*" ,
"restart" : False ,
"extraOptions" : None ,
"fileGaussianError" : "job.err" ,
"fileGaussianInput" : "job.inp" ,
"fileGaussianOutput" : "job.log" ,
"fileGaussianCheckpoint" : "job.chk" ,
"SCFConvergence" : 10 ,
"pathGaussian" : os.path.join (os.environ["HOME"], "local", "opt", "g03", "g03") ,
}
defaultAttributes.update (QMCaller.defaultAttributes)
def __init__ (self, **keywordArguments):
"""Constructor."""
super (QMCallerGaussian, self).__init__ (**keywordArguments)
# . Determine if a semiempirical potential is used
method = self.method[:3]
if method in ("AM1", "PM3", ) and self.qmmm:
raise exceptions.StandardError ("Point charges cannot be used with semiempirical methods.")
# . Reuse the wavefunction if the checkpoint file exists
if self.fileGaussianCheckpoint:
self.restart = os.path.exists (self.fileGaussianCheckpoint) and self.restart
else:
self.restart = False
# . Prepare a Gaussian input file
self._WriteInput ()
def _WriteInput (self):
"""Write a Gaussian input file."""
# . Write job control
data = []
if self.ncpu > 1:
data.append ("%%NProcShared=%d\n" % self.ncpu)
if self.memory:
data.append ("%%mem=%dgb\n" % self.memory)
if self.fileGaussianCheckpoint:
data.append ("%%chk=%s\n" % self.fileGaussianCheckpoint)
# . Set up a charge scheme
schemes = {
CS_MULLIKEN : "" ,
CS_CHELPG : "POP=CHELPG" ,
CS_MERZKOLLMAN : "POP=MK" ,
}
if not schemes.has_key (self.chargeScheme):
raise exceptions.StandardError ("Charge scheme %s is undefined." % self.chargeScheme)
chargeScheme = schemes[self.chargeScheme]
# . Include extra options, if any present
if self.extraOptions:
if isinstance (self.extraOptions, tuple):
extraOptions = " ".join (self.extraOptions)
else:
extraOptions = self.extraOptions
else:
extraOptions = ""
# . Write header
if self.qmmm:
background = "Charge Prop=(Field,Read)"
elif self.cosmo:
background = "SCRF=(Solvent=Water,Read)"
else:
background = ""
if self.restart:
restart = "Guess=Read"
else:
restart = ""
if self.SCFConvergence != 6:
scfConvergence = "SCF=(Conver=%d)" % self.SCFConvergence
else:
scfConvergence = ""
keywords = (
self.method ,
"NoSymm" ,
"Force" ,
background ,
restart ,
chargeScheme ,
scfConvergence ,
extraOptions ,
)
header = " ".join (keywords)
data.append ("#P " + header + "\n\n")
mdstep = ""
if hasattr (self.molaris, "mdstep"):
mdstep = " (MD step: %d)" % self.molaris.mdstep
data.append ("Input file generated by MolarisTools%s.\n\n" % mdstep)
data.append ("%d %d\n" % (self.charge, self.multiplicity))
# . Write geometry
atoms = self.molaris.qatoms + self.molaris.latoms
for atom in atoms:
data.append ("%2s %16.10f %16.10f %16.10f\n" % (atom.label, atom.x, atom.y, atom.z))
data.append ("\n")
# . If cosmo=True, write epsilon
if self.cosmo:
data.append ("eps=%f\n\n" % self.dielectric)
# . Write point charges
if self.qmmm:
pointCharges = self.molaris.patoms + self.molaris.watoms
for atom in pointCharges:
data.append ("%16.10f %16.10f %16.10f %16.10f\n" % (atom.x, atom.y, atom.z, atom.charge))
data.append ("\n")
# . Write points where the electric field is be calculated
for atom in pointCharges:
data.append ("%16.10f %16.10f %16.10f\n" % (atom.x, atom.y, atom.z))
data.append ("\n")
# . Finish up
WriteData (data, self.fileGaussianInput)
def Run (self):
"""Run the calculation."""
fileError = open (self.fileGaussianError, "w")
if self.env:
subprocess.check_call ([self.pathGaussian, self.fileGaussianInput], stdout=fileError, stderr=fileError, env=self.env)
else:
subprocess.check_call ([self.pathGaussian, self.fileGaussianInput], stdout=fileError, stderr=fileError)
fileError.close ()
# . Parse the output file
gaussian = GaussianOutputFile (filename=self.fileGaussianOutput)
# . Important: if there are point charges, remove their self interaction energy from the final QM energy
self.Efinal = (gaussian.Efinal - gaussian.Echrg) if self.qmmm else gaussian.Efinal
# . Include forces on QM atoms
self.forces = gaussian.forces
# . Include forces on point charges
if hasattr (gaussian, "pointCharges"):
mmforces = []
for pc in gaussian.pointCharges:
force = Force (
x = pc.ex * pc.charge ,
y = pc.ey * pc.charge ,
z = pc.ez * pc.charge , )
mmforces.append (force)
self.mmforces = mmforces
# . Include charges
scheme = {
CS_MULLIKEN : gaussian.charges if hasattr (gaussian, "charges" ) else [] ,
CS_CHELPG : gaussian.espcharges if hasattr (gaussian, "espcharges") else [] ,
CS_MERZKOLLMAN : gaussian.espcharges if hasattr (gaussian, "espcharges") else [] , }
self.charges = scheme[self.chargeScheme]
# . Include timing information
self.jobtime = gaussian.jobtime
# . Finish up
self._Finalize ()
#===============================================================================
# . Main program
#===============================================================================
if __name__ == "__main__": pass
|
mfx9/MolarisTools
|
MolarisTools/QMMM/QMCallerGaussian.py
|
Python
|
gpl-3.0
| 7,677
|
[
"Gaussian"
] |
8eb4f29bdecabad18e6679cc2b6bb4961f8e1b9e0d4814e4e24f4e9995804b96
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
from __future__ import division
from __future__ import unicode_literals
from deepchem.feat import Featurizer
class RawFeaturizer(Featurizer):
def __init__(self, smiles=False):
self.smiles = smiles
def _featurize(self, mol):
from rdkit import Chem
if self.smiles:
return Chem.MolToSmiles(mol)
else:
return mol
|
ktaneishi/deepchem
|
deepchem/feat/raw_featurizer.py
|
Python
|
mit
| 393
|
[
"RDKit"
] |
3ca9cf586fec67f996d3b5c71bca609f0ad383574ca3bb8c63c4cdac44094c3b
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import mock
import packaging.version
import grpc
from grpc.experimental import aio
import math
import pytest
from proto.marshal.rules.dates import DurationRule, TimestampRule
from google.api_core import client_options
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import grpc_helpers
from google.api_core import grpc_helpers_async
from google.auth import credentials as ga_credentials
from google.auth.exceptions import MutualTLSChannelError
from google.cloud.aiplatform_v1beta1.services.featurestore_online_serving_service import (
FeaturestoreOnlineServingServiceAsyncClient,
)
from google.cloud.aiplatform_v1beta1.services.featurestore_online_serving_service import (
FeaturestoreOnlineServingServiceClient,
)
from google.cloud.aiplatform_v1beta1.services.featurestore_online_serving_service import (
transports,
)
from google.cloud.aiplatform_v1beta1.services.featurestore_online_serving_service.transports.base import (
_GOOGLE_AUTH_VERSION,
)
from google.cloud.aiplatform_v1beta1.types import feature_selector
from google.cloud.aiplatform_v1beta1.types import featurestore_online_service
from google.oauth2 import service_account
import google.auth
# TODO(busunkim): Once google-auth >= 1.25.0 is required transitively
# through google-api-core:
# - Delete the auth "less than" test cases
# - Delete these pytest markers (Make the "greater than or equal to" tests the default).
requires_google_auth_lt_1_25_0 = pytest.mark.skipif(
packaging.version.parse(_GOOGLE_AUTH_VERSION) >= packaging.version.parse("1.25.0"),
reason="This test requires google-auth < 1.25.0",
)
requires_google_auth_gte_1_25_0 = pytest.mark.skipif(
packaging.version.parse(_GOOGLE_AUTH_VERSION) < packaging.version.parse("1.25.0"),
reason="This test requires google-auth >= 1.25.0",
)
def client_cert_source_callback():
return b"cert bytes", b"key bytes"
# If default endpoint is localhost, then default mtls endpoint will be the same.
# This method modifies the default endpoint so the client can produce a different
# mtls endpoint for endpoint testing purposes.
def modify_default_endpoint(client):
return (
"foo.googleapis.com"
if ("localhost" in client.DEFAULT_ENDPOINT)
else client.DEFAULT_ENDPOINT
)
def test__get_default_mtls_endpoint():
api_endpoint = "example.googleapis.com"
api_mtls_endpoint = "example.mtls.googleapis.com"
sandbox_endpoint = "example.sandbox.googleapis.com"
sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com"
non_googleapi = "api.example.com"
assert (
FeaturestoreOnlineServingServiceClient._get_default_mtls_endpoint(None) is None
)
assert (
FeaturestoreOnlineServingServiceClient._get_default_mtls_endpoint(api_endpoint)
== api_mtls_endpoint
)
assert (
FeaturestoreOnlineServingServiceClient._get_default_mtls_endpoint(
api_mtls_endpoint
)
== api_mtls_endpoint
)
assert (
FeaturestoreOnlineServingServiceClient._get_default_mtls_endpoint(
sandbox_endpoint
)
== sandbox_mtls_endpoint
)
assert (
FeaturestoreOnlineServingServiceClient._get_default_mtls_endpoint(
sandbox_mtls_endpoint
)
== sandbox_mtls_endpoint
)
assert (
FeaturestoreOnlineServingServiceClient._get_default_mtls_endpoint(non_googleapi)
== non_googleapi
)
@pytest.mark.parametrize(
"client_class",
[
FeaturestoreOnlineServingServiceClient,
FeaturestoreOnlineServingServiceAsyncClient,
],
)
def test_featurestore_online_serving_service_client_from_service_account_info(
client_class,
):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_info"
) as factory:
factory.return_value = creds
info = {"valid": True}
client = client_class.from_service_account_info(info)
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "aiplatform.googleapis.com:443"
@pytest.mark.parametrize(
"transport_class,transport_name",
[
(transports.FeaturestoreOnlineServingServiceGrpcTransport, "grpc"),
(
transports.FeaturestoreOnlineServingServiceGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
def test_featurestore_online_serving_service_client_service_account_always_use_jwt(
transport_class, transport_name
):
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=True)
use_jwt.assert_called_once_with(True)
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=False)
use_jwt.assert_not_called()
@pytest.mark.parametrize(
"client_class",
[
FeaturestoreOnlineServingServiceClient,
FeaturestoreOnlineServingServiceAsyncClient,
],
)
def test_featurestore_online_serving_service_client_from_service_account_file(
client_class,
):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_file"
) as factory:
factory.return_value = creds
client = client_class.from_service_account_file("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
client = client_class.from_service_account_json("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "aiplatform.googleapis.com:443"
def test_featurestore_online_serving_service_client_get_transport_class():
transport = FeaturestoreOnlineServingServiceClient.get_transport_class()
available_transports = [
transports.FeaturestoreOnlineServingServiceGrpcTransport,
]
assert transport in available_transports
transport = FeaturestoreOnlineServingServiceClient.get_transport_class("grpc")
assert transport == transports.FeaturestoreOnlineServingServiceGrpcTransport
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(
FeaturestoreOnlineServingServiceClient,
transports.FeaturestoreOnlineServingServiceGrpcTransport,
"grpc",
),
(
FeaturestoreOnlineServingServiceAsyncClient,
transports.FeaturestoreOnlineServingServiceGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
@mock.patch.object(
FeaturestoreOnlineServingServiceClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(FeaturestoreOnlineServingServiceClient),
)
@mock.patch.object(
FeaturestoreOnlineServingServiceAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(FeaturestoreOnlineServingServiceAsyncClient),
)
def test_featurestore_online_serving_service_client_client_options(
client_class, transport_class, transport_name
):
# Check that if channel is provided we won't create a new one.
with mock.patch.object(
FeaturestoreOnlineServingServiceClient, "get_transport_class"
) as gtc:
transport = transport_class(credentials=ga_credentials.AnonymousCredentials())
client = client_class(transport=transport)
gtc.assert_not_called()
# Check that if channel is provided via str we will create a new one.
with mock.patch.object(
FeaturestoreOnlineServingServiceClient, "get_transport_class"
) as gtc:
client = client_class(transport=transport_name)
gtc.assert_called()
# Check the case api_endpoint is provided.
options = client_options.ClientOptions(api_endpoint="squid.clam.whelk")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_MTLS_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has
# unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
with pytest.raises(MutualTLSChannelError):
client = client_class()
# Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}
):
with pytest.raises(ValueError):
client = client_class()
# Check the case quota_project_id is provided
options = client_options.ClientOptions(quota_project_id="octopus")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id="octopus",
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,use_client_cert_env",
[
(
FeaturestoreOnlineServingServiceClient,
transports.FeaturestoreOnlineServingServiceGrpcTransport,
"grpc",
"true",
),
(
FeaturestoreOnlineServingServiceAsyncClient,
transports.FeaturestoreOnlineServingServiceGrpcAsyncIOTransport,
"grpc_asyncio",
"true",
),
(
FeaturestoreOnlineServingServiceClient,
transports.FeaturestoreOnlineServingServiceGrpcTransport,
"grpc",
"false",
),
(
FeaturestoreOnlineServingServiceAsyncClient,
transports.FeaturestoreOnlineServingServiceGrpcAsyncIOTransport,
"grpc_asyncio",
"false",
),
],
)
@mock.patch.object(
FeaturestoreOnlineServingServiceClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(FeaturestoreOnlineServingServiceClient),
)
@mock.patch.object(
FeaturestoreOnlineServingServiceAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(FeaturestoreOnlineServingServiceAsyncClient),
)
@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"})
def test_featurestore_online_serving_service_client_mtls_env_auto(
client_class, transport_class, transport_name, use_client_cert_env
):
# This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default
# mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists.
# Check the case client_cert_source is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
options = client_options.ClientOptions(
client_cert_source=client_cert_source_callback
)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
if use_client_cert_env == "false":
expected_client_cert_source = None
expected_host = client.DEFAULT_ENDPOINT
else:
expected_client_cert_source = client_cert_source_callback
expected_host = client.DEFAULT_MTLS_ENDPOINT
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case ADC client cert is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=client_cert_source_callback,
):
if use_client_cert_env == "false":
expected_host = client.DEFAULT_ENDPOINT
expected_client_cert_source = None
else:
expected_host = client.DEFAULT_MTLS_ENDPOINT
expected_client_cert_source = client_cert_source_callback
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case client_cert_source and ADC client cert are not provided.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(
FeaturestoreOnlineServingServiceClient,
transports.FeaturestoreOnlineServingServiceGrpcTransport,
"grpc",
),
(
FeaturestoreOnlineServingServiceAsyncClient,
transports.FeaturestoreOnlineServingServiceGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
def test_featurestore_online_serving_service_client_client_options_scopes(
client_class, transport_class, transport_name
):
# Check the case scopes are provided.
options = client_options.ClientOptions(scopes=["1", "2"],)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=["1", "2"],
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(
FeaturestoreOnlineServingServiceClient,
transports.FeaturestoreOnlineServingServiceGrpcTransport,
"grpc",
),
(
FeaturestoreOnlineServingServiceAsyncClient,
transports.FeaturestoreOnlineServingServiceGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
def test_featurestore_online_serving_service_client_client_options_credentials_file(
client_class, transport_class, transport_name
):
# Check the case credentials file is provided.
options = client_options.ClientOptions(credentials_file="credentials.json")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
def test_featurestore_online_serving_service_client_client_options_from_dict():
with mock.patch(
"google.cloud.aiplatform_v1beta1.services.featurestore_online_serving_service.transports.FeaturestoreOnlineServingServiceGrpcTransport.__init__"
) as grpc_transport:
grpc_transport.return_value = None
client = FeaturestoreOnlineServingServiceClient(
client_options={"api_endpoint": "squid.clam.whelk"}
)
grpc_transport.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
def test_read_feature_values(
transport: str = "grpc",
request_type=featurestore_online_service.ReadFeatureValuesRequest,
):
client = FeaturestoreOnlineServingServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.read_feature_values), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = featurestore_online_service.ReadFeatureValuesResponse()
response = client.read_feature_values(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == featurestore_online_service.ReadFeatureValuesRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, featurestore_online_service.ReadFeatureValuesResponse)
def test_read_feature_values_from_dict():
test_read_feature_values(request_type=dict)
def test_read_feature_values_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = FeaturestoreOnlineServingServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.read_feature_values), "__call__"
) as call:
client.read_feature_values()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == featurestore_online_service.ReadFeatureValuesRequest()
@pytest.mark.asyncio
async def test_read_feature_values_async(
transport: str = "grpc_asyncio",
request_type=featurestore_online_service.ReadFeatureValuesRequest,
):
client = FeaturestoreOnlineServingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.read_feature_values), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
featurestore_online_service.ReadFeatureValuesResponse()
)
response = await client.read_feature_values(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == featurestore_online_service.ReadFeatureValuesRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, featurestore_online_service.ReadFeatureValuesResponse)
@pytest.mark.asyncio
async def test_read_feature_values_async_from_dict():
await test_read_feature_values_async(request_type=dict)
def test_read_feature_values_field_headers():
client = FeaturestoreOnlineServingServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = featurestore_online_service.ReadFeatureValuesRequest()
request.entity_type = "entity_type/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.read_feature_values), "__call__"
) as call:
call.return_value = featurestore_online_service.ReadFeatureValuesResponse()
client.read_feature_values(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "entity_type=entity_type/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_read_feature_values_field_headers_async():
client = FeaturestoreOnlineServingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = featurestore_online_service.ReadFeatureValuesRequest()
request.entity_type = "entity_type/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.read_feature_values), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
featurestore_online_service.ReadFeatureValuesResponse()
)
await client.read_feature_values(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "entity_type=entity_type/value",) in kw["metadata"]
def test_read_feature_values_flattened():
client = FeaturestoreOnlineServingServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.read_feature_values), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = featurestore_online_service.ReadFeatureValuesResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.read_feature_values(entity_type="entity_type_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].entity_type == "entity_type_value"
def test_read_feature_values_flattened_error():
client = FeaturestoreOnlineServingServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.read_feature_values(
featurestore_online_service.ReadFeatureValuesRequest(),
entity_type="entity_type_value",
)
@pytest.mark.asyncio
async def test_read_feature_values_flattened_async():
client = FeaturestoreOnlineServingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.read_feature_values), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = featurestore_online_service.ReadFeatureValuesResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
featurestore_online_service.ReadFeatureValuesResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.read_feature_values(entity_type="entity_type_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].entity_type == "entity_type_value"
@pytest.mark.asyncio
async def test_read_feature_values_flattened_error_async():
client = FeaturestoreOnlineServingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.read_feature_values(
featurestore_online_service.ReadFeatureValuesRequest(),
entity_type="entity_type_value",
)
def test_streaming_read_feature_values(
transport: str = "grpc",
request_type=featurestore_online_service.StreamingReadFeatureValuesRequest,
):
client = FeaturestoreOnlineServingServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.streaming_read_feature_values), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = iter(
[featurestore_online_service.ReadFeatureValuesResponse()]
)
response = client.streaming_read_feature_values(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert (
args[0] == featurestore_online_service.StreamingReadFeatureValuesRequest()
)
# Establish that the response is the type that we expect.
for message in response:
assert isinstance(
message, featurestore_online_service.ReadFeatureValuesResponse
)
def test_streaming_read_feature_values_from_dict():
test_streaming_read_feature_values(request_type=dict)
def test_streaming_read_feature_values_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = FeaturestoreOnlineServingServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.streaming_read_feature_values), "__call__"
) as call:
client.streaming_read_feature_values()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert (
args[0] == featurestore_online_service.StreamingReadFeatureValuesRequest()
)
@pytest.mark.asyncio
async def test_streaming_read_feature_values_async(
transport: str = "grpc_asyncio",
request_type=featurestore_online_service.StreamingReadFeatureValuesRequest,
):
client = FeaturestoreOnlineServingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.streaming_read_feature_values), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True)
call.return_value.read = mock.AsyncMock(
side_effect=[featurestore_online_service.ReadFeatureValuesResponse()]
)
response = await client.streaming_read_feature_values(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert (
args[0] == featurestore_online_service.StreamingReadFeatureValuesRequest()
)
# Establish that the response is the type that we expect.
message = await response.read()
assert isinstance(message, featurestore_online_service.ReadFeatureValuesResponse)
@pytest.mark.asyncio
async def test_streaming_read_feature_values_async_from_dict():
await test_streaming_read_feature_values_async(request_type=dict)
def test_streaming_read_feature_values_field_headers():
client = FeaturestoreOnlineServingServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = featurestore_online_service.StreamingReadFeatureValuesRequest()
request.entity_type = "entity_type/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.streaming_read_feature_values), "__call__"
) as call:
call.return_value = iter(
[featurestore_online_service.ReadFeatureValuesResponse()]
)
client.streaming_read_feature_values(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "entity_type=entity_type/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_streaming_read_feature_values_field_headers_async():
client = FeaturestoreOnlineServingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = featurestore_online_service.StreamingReadFeatureValuesRequest()
request.entity_type = "entity_type/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.streaming_read_feature_values), "__call__"
) as call:
call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True)
call.return_value.read = mock.AsyncMock(
side_effect=[featurestore_online_service.ReadFeatureValuesResponse()]
)
await client.streaming_read_feature_values(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "entity_type=entity_type/value",) in kw["metadata"]
def test_streaming_read_feature_values_flattened():
client = FeaturestoreOnlineServingServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.streaming_read_feature_values), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = iter(
[featurestore_online_service.ReadFeatureValuesResponse()]
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.streaming_read_feature_values(entity_type="entity_type_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].entity_type == "entity_type_value"
def test_streaming_read_feature_values_flattened_error():
client = FeaturestoreOnlineServingServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.streaming_read_feature_values(
featurestore_online_service.StreamingReadFeatureValuesRequest(),
entity_type="entity_type_value",
)
@pytest.mark.asyncio
async def test_streaming_read_feature_values_flattened_async():
client = FeaturestoreOnlineServingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.streaming_read_feature_values), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = iter(
[featurestore_online_service.ReadFeatureValuesResponse()]
)
call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.streaming_read_feature_values(
entity_type="entity_type_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].entity_type == "entity_type_value"
@pytest.mark.asyncio
async def test_streaming_read_feature_values_flattened_error_async():
client = FeaturestoreOnlineServingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.streaming_read_feature_values(
featurestore_online_service.StreamingReadFeatureValuesRequest(),
entity_type="entity_type_value",
)
def test_credentials_transport_error():
# It is an error to provide credentials and a transport instance.
transport = transports.FeaturestoreOnlineServingServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = FeaturestoreOnlineServingServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# It is an error to provide a credentials file and a transport instance.
transport = transports.FeaturestoreOnlineServingServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = FeaturestoreOnlineServingServiceClient(
client_options={"credentials_file": "credentials.json"},
transport=transport,
)
# It is an error to provide scopes and a transport instance.
transport = transports.FeaturestoreOnlineServingServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = FeaturestoreOnlineServingServiceClient(
client_options={"scopes": ["1", "2"]}, transport=transport,
)
def test_transport_instance():
# A client may be instantiated with a custom transport instance.
transport = transports.FeaturestoreOnlineServingServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
client = FeaturestoreOnlineServingServiceClient(transport=transport)
assert client.transport is transport
def test_transport_get_channel():
# A client may be instantiated with a custom transport instance.
transport = transports.FeaturestoreOnlineServingServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
transport = transports.FeaturestoreOnlineServingServiceGrpcAsyncIOTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
@pytest.mark.parametrize(
"transport_class",
[
transports.FeaturestoreOnlineServingServiceGrpcTransport,
transports.FeaturestoreOnlineServingServiceGrpcAsyncIOTransport,
],
)
def test_transport_adc(transport_class):
# Test default credentials are used if not provided.
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class()
adc.assert_called_once()
def test_transport_grpc_default():
# A client should use the gRPC transport by default.
client = FeaturestoreOnlineServingServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
assert isinstance(
client.transport, transports.FeaturestoreOnlineServingServiceGrpcTransport,
)
def test_featurestore_online_serving_service_base_transport_error():
# Passing both a credentials object and credentials_file should raise an error
with pytest.raises(core_exceptions.DuplicateCredentialArgs):
transport = transports.FeaturestoreOnlineServingServiceTransport(
credentials=ga_credentials.AnonymousCredentials(),
credentials_file="credentials.json",
)
def test_featurestore_online_serving_service_base_transport():
# Instantiate the base transport.
with mock.patch(
"google.cloud.aiplatform_v1beta1.services.featurestore_online_serving_service.transports.FeaturestoreOnlineServingServiceTransport.__init__"
) as Transport:
Transport.return_value = None
transport = transports.FeaturestoreOnlineServingServiceTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
# Every method on the transport should just blindly
# raise NotImplementedError.
methods = (
"read_feature_values",
"streaming_read_feature_values",
)
for method in methods:
with pytest.raises(NotImplementedError):
getattr(transport, method)(request=object())
@requires_google_auth_gte_1_25_0
def test_featurestore_online_serving_service_base_transport_with_credentials_file():
# Instantiate the base transport with a credentials file
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch(
"google.cloud.aiplatform_v1beta1.services.featurestore_online_serving_service.transports.FeaturestoreOnlineServingServiceTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.FeaturestoreOnlineServingServiceTransport(
credentials_file="credentials.json", quota_project_id="octopus",
)
load_creds.assert_called_once_with(
"credentials.json",
scopes=None,
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id="octopus",
)
@requires_google_auth_lt_1_25_0
def test_featurestore_online_serving_service_base_transport_with_credentials_file_old_google_auth():
# Instantiate the base transport with a credentials file
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch(
"google.cloud.aiplatform_v1beta1.services.featurestore_online_serving_service.transports.FeaturestoreOnlineServingServiceTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.FeaturestoreOnlineServingServiceTransport(
credentials_file="credentials.json", quota_project_id="octopus",
)
load_creds.assert_called_once_with(
"credentials.json",
scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id="octopus",
)
def test_featurestore_online_serving_service_base_transport_with_adc():
# Test the default credentials are used if credentials and credentials_file are None.
with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch(
"google.cloud.aiplatform_v1beta1.services.featurestore_online_serving_service.transports.FeaturestoreOnlineServingServiceTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.FeaturestoreOnlineServingServiceTransport()
adc.assert_called_once()
@requires_google_auth_gte_1_25_0
def test_featurestore_online_serving_service_auth_adc():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
FeaturestoreOnlineServingServiceClient()
adc.assert_called_once_with(
scopes=None,
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id=None,
)
@requires_google_auth_lt_1_25_0
def test_featurestore_online_serving_service_auth_adc_old_google_auth():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
FeaturestoreOnlineServingServiceClient()
adc.assert_called_once_with(
scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id=None,
)
@pytest.mark.parametrize(
"transport_class",
[
transports.FeaturestoreOnlineServingServiceGrpcTransport,
transports.FeaturestoreOnlineServingServiceGrpcAsyncIOTransport,
],
)
@requires_google_auth_gte_1_25_0
def test_featurestore_online_serving_service_transport_auth_adc(transport_class):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
adc.assert_called_once_with(
scopes=["1", "2"],
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id="octopus",
)
@pytest.mark.parametrize(
"transport_class",
[
transports.FeaturestoreOnlineServingServiceGrpcTransport,
transports.FeaturestoreOnlineServingServiceGrpcAsyncIOTransport,
],
)
@requires_google_auth_lt_1_25_0
def test_featurestore_online_serving_service_transport_auth_adc_old_google_auth(
transport_class,
):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class(quota_project_id="octopus")
adc.assert_called_once_with(
scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id="octopus",
)
@pytest.mark.parametrize(
"transport_class,grpc_helpers",
[
(transports.FeaturestoreOnlineServingServiceGrpcTransport, grpc_helpers),
(
transports.FeaturestoreOnlineServingServiceGrpcAsyncIOTransport,
grpc_helpers_async,
),
],
)
def test_featurestore_online_serving_service_transport_create_channel(
transport_class, grpc_helpers
):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(
google.auth, "default", autospec=True
) as adc, mock.patch.object(
grpc_helpers, "create_channel", autospec=True
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
adc.return_value = (creds, None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
create_channel.assert_called_with(
"aiplatform.googleapis.com:443",
credentials=creds,
credentials_file=None,
quota_project_id="octopus",
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
scopes=["1", "2"],
default_host="aiplatform.googleapis.com",
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize(
"transport_class",
[
transports.FeaturestoreOnlineServingServiceGrpcTransport,
transports.FeaturestoreOnlineServingServiceGrpcAsyncIOTransport,
],
)
def test_featurestore_online_serving_service_grpc_transport_client_cert_source_for_mtls(
transport_class,
):
cred = ga_credentials.AnonymousCredentials()
# Check ssl_channel_credentials is used if provided.
with mock.patch.object(transport_class, "create_channel") as mock_create_channel:
mock_ssl_channel_creds = mock.Mock()
transport_class(
host="squid.clam.whelk",
credentials=cred,
ssl_channel_credentials=mock_ssl_channel_creds,
)
mock_create_channel.assert_called_once_with(
"squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_channel_creds,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls
# is used.
with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()):
with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred:
transport_class(
credentials=cred,
client_cert_source_for_mtls=client_cert_source_callback,
)
expected_cert, expected_key = client_cert_source_callback()
mock_ssl_cred.assert_called_once_with(
certificate_chain=expected_cert, private_key=expected_key
)
def test_featurestore_online_serving_service_host_no_port():
client = FeaturestoreOnlineServingServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="aiplatform.googleapis.com"
),
)
assert client.transport._host == "aiplatform.googleapis.com:443"
def test_featurestore_online_serving_service_host_with_port():
client = FeaturestoreOnlineServingServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="aiplatform.googleapis.com:8000"
),
)
assert client.transport._host == "aiplatform.googleapis.com:8000"
def test_featurestore_online_serving_service_grpc_transport_channel():
channel = grpc.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.FeaturestoreOnlineServingServiceGrpcTransport(
host="squid.clam.whelk", channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
def test_featurestore_online_serving_service_grpc_asyncio_transport_channel():
channel = aio.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.FeaturestoreOnlineServingServiceGrpcAsyncIOTransport(
host="squid.clam.whelk", channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[
transports.FeaturestoreOnlineServingServiceGrpcTransport,
transports.FeaturestoreOnlineServingServiceGrpcAsyncIOTransport,
],
)
def test_featurestore_online_serving_service_transport_channel_mtls_with_client_cert_source(
transport_class,
):
with mock.patch(
"grpc.ssl_channel_credentials", autospec=True
) as grpc_ssl_channel_cred:
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_ssl_cred = mock.Mock()
grpc_ssl_channel_cred.return_value = mock_ssl_cred
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
cred = ga_credentials.AnonymousCredentials()
with pytest.warns(DeprecationWarning):
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (cred, None)
transport = transport_class(
host="squid.clam.whelk",
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=client_cert_source_callback,
)
adc.assert_called_once()
grpc_ssl_channel_cred.assert_called_once_with(
certificate_chain=b"cert bytes", private_key=b"key bytes"
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
assert transport._ssl_channel_credentials == mock_ssl_cred
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[
transports.FeaturestoreOnlineServingServiceGrpcTransport,
transports.FeaturestoreOnlineServingServiceGrpcAsyncIOTransport,
],
)
def test_featurestore_online_serving_service_transport_channel_mtls_with_adc(
transport_class,
):
mock_ssl_cred = mock.Mock()
with mock.patch.multiple(
"google.auth.transport.grpc.SslCredentials",
__init__=mock.Mock(return_value=None),
ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred),
):
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
mock_cred = mock.Mock()
with pytest.warns(DeprecationWarning):
transport = transport_class(
host="squid.clam.whelk",
credentials=mock_cred,
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=None,
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=mock_cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
def test_entity_type_path():
project = "squid"
location = "clam"
featurestore = "whelk"
entity_type = "octopus"
expected = "projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}".format(
project=project,
location=location,
featurestore=featurestore,
entity_type=entity_type,
)
actual = FeaturestoreOnlineServingServiceClient.entity_type_path(
project, location, featurestore, entity_type
)
assert expected == actual
def test_parse_entity_type_path():
expected = {
"project": "oyster",
"location": "nudibranch",
"featurestore": "cuttlefish",
"entity_type": "mussel",
}
path = FeaturestoreOnlineServingServiceClient.entity_type_path(**expected)
# Check that the path construction is reversible.
actual = FeaturestoreOnlineServingServiceClient.parse_entity_type_path(path)
assert expected == actual
def test_common_billing_account_path():
billing_account = "winkle"
expected = "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
actual = FeaturestoreOnlineServingServiceClient.common_billing_account_path(
billing_account
)
assert expected == actual
def test_parse_common_billing_account_path():
expected = {
"billing_account": "nautilus",
}
path = FeaturestoreOnlineServingServiceClient.common_billing_account_path(
**expected
)
# Check that the path construction is reversible.
actual = FeaturestoreOnlineServingServiceClient.parse_common_billing_account_path(
path
)
assert expected == actual
def test_common_folder_path():
folder = "scallop"
expected = "folders/{folder}".format(folder=folder,)
actual = FeaturestoreOnlineServingServiceClient.common_folder_path(folder)
assert expected == actual
def test_parse_common_folder_path():
expected = {
"folder": "abalone",
}
path = FeaturestoreOnlineServingServiceClient.common_folder_path(**expected)
# Check that the path construction is reversible.
actual = FeaturestoreOnlineServingServiceClient.parse_common_folder_path(path)
assert expected == actual
def test_common_organization_path():
organization = "squid"
expected = "organizations/{organization}".format(organization=organization,)
actual = FeaturestoreOnlineServingServiceClient.common_organization_path(
organization
)
assert expected == actual
def test_parse_common_organization_path():
expected = {
"organization": "clam",
}
path = FeaturestoreOnlineServingServiceClient.common_organization_path(**expected)
# Check that the path construction is reversible.
actual = FeaturestoreOnlineServingServiceClient.parse_common_organization_path(path)
assert expected == actual
def test_common_project_path():
project = "whelk"
expected = "projects/{project}".format(project=project,)
actual = FeaturestoreOnlineServingServiceClient.common_project_path(project)
assert expected == actual
def test_parse_common_project_path():
expected = {
"project": "octopus",
}
path = FeaturestoreOnlineServingServiceClient.common_project_path(**expected)
# Check that the path construction is reversible.
actual = FeaturestoreOnlineServingServiceClient.parse_common_project_path(path)
assert expected == actual
def test_common_location_path():
project = "oyster"
location = "nudibranch"
expected = "projects/{project}/locations/{location}".format(
project=project, location=location,
)
actual = FeaturestoreOnlineServingServiceClient.common_location_path(
project, location
)
assert expected == actual
def test_parse_common_location_path():
expected = {
"project": "cuttlefish",
"location": "mussel",
}
path = FeaturestoreOnlineServingServiceClient.common_location_path(**expected)
# Check that the path construction is reversible.
actual = FeaturestoreOnlineServingServiceClient.parse_common_location_path(path)
assert expected == actual
def test_client_withDEFAULT_CLIENT_INFO():
client_info = gapic_v1.client_info.ClientInfo()
with mock.patch.object(
transports.FeaturestoreOnlineServingServiceTransport, "_prep_wrapped_messages"
) as prep:
client = FeaturestoreOnlineServingServiceClient(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
with mock.patch.object(
transports.FeaturestoreOnlineServingServiceTransport, "_prep_wrapped_messages"
) as prep:
transport_class = FeaturestoreOnlineServingServiceClient.get_transport_class()
transport = transport_class(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
|
sasha-gitg/python-aiplatform
|
tests/unit/gapic/aiplatform_v1beta1/test_featurestore_online_serving_service.py
|
Python
|
apache-2.0
| 62,346
|
[
"Octopus"
] |
e57972c747389e180e63b207e970e1dd18aecf79c34f187771d58293499ac24b
|
"""
Classes of taxonomy data
"""
import os
import sys
class NoNameFoundError(Exception):
"""No name was found for this entry"""
def __init__(self, message):
self.message = message
class TaxonNode:
def __init__(self, t=None, p=None, r=None, e=None, d=None, i=None, gc=None, igc=False, mgc=None, imgc=False,
gh=False, hs=False, c=None, *others):
self.parent = p
self.taxid = t
self.rank = r
self.embl = e
self.division = d
self.inherited = i
self.geneticCode = gc
self.inheritedGC = igc
self.mitochondrialGeneticCode = mgc
self.inheritedMitochondrialGeneticCode = imgc
self.GenBankHidden = gh
self.hiddenSubtree = hs
self.comments = c
if len(others) > 0:
print("WARNING: {} :: {}".format(p, others))
class TaxonName:
def __init__(self, t=None, n=None, u=None, nc=None):
self.taxid = t
self.acronym = []
self.anamorph = []
self.authority = []
self.blast_name = None
self.common_name = []
self.equivalent_name = []
self.genbank_acronym = None
self.genbank_anamorph = None
self.genbank_common_name = None
self.genbank_synonym = []
self.in_part = []
self.includes = []
self.misnomer = []
self.misspelling = []
self.scientific_name = None
self.synonym = []
self.telemorph = []
self.type_material = []
self.unique = u
def set_name(self, nametype, nameval):
"""
Set the name for this tax id. ALlows multiple names for same ID
:param nametype: the type of name
:param nameval: the name
"""
if "acronym" == nametype:
self.acronym.append(nameval)
elif "anamorph" == nametype:
self.anamorph.append(nameval)
elif "authority" == nametype:
self.authority.append(nameval)
elif "blast name" == nametype:
self.blast_name = nameval
elif "common name" == nametype:
self.common_name.append(nameval)
elif "equivalent name" == nametype:
self.equivalent_name.append(nameval)
elif "genbank acronym" == nametype:
self.genbank_acronym = nameval
elif "genbank anamorph" == nametype:
self.genbank_anamorph = nameval
elif "genbank common name" == nametype:
self.genbank_common_name = nameval
elif "genbank synonym" == nametype:
self.genbank_synonym.append(nameval)
elif "in-part" == nametype:
self.in_part.append(nameval)
elif "includes" == nametype:
self.includes.append(nameval)
elif "misnomer" == nametype:
self.misnomer.append(nameval)
elif "misspelling" == nametype:
self.misspelling.append(nameval)
elif "scientific name" == nametype:
self.scientific_name = nameval
elif "synonym" == nametype:
self.synonym.append(nameval)
elif "teleomorph" == nametype:
self.telemorph.append(nameval)
elif "type material" == nametype:
self.type_material.append(nameval)
else:
sys.stderr.write("Do not recognise name type |{}|\n".format(nametype))
def get_name(self):
"""
Get the preferred name for this taxon
:return: a string with the name
"""
if self.blast_name:
return self.blast_name
if self.scientific_name:
return self.scientific_name
if self.common_name:
return self.common_name
if self.equivalent_name:
return self.equivalent_name
raise NoNameFoundError(f"No name was found for taxonomy ID {self.taxid}")
class TaxonDivision:
def __init__(self, i=None, c=None, n=None, co=None):
self.divid = i
self.name = n
self.code = c
self.comments = co
|
linsalrob/EdwardsLab
|
taxon/taxonomy/taxonomy.py
|
Python
|
mit
| 4,030
|
[
"BLAST"
] |
696ab896f17c31a8e99d946449be4c2bb25442bbc3fd080a768186bd5cc55b88
|
# gaussfitter.py
# created by Adam Ginsburg (adam.ginsburg@colorado.edu or keflavich@gmail.com) 3/17/08)
# latest version available at http://code.google.com/p/agpy/source/browse/trunk/agpy/gaussfitter.py
import numpy
from numpy.ma import median
from numpy import pi
#from scipy import optimize,stats,pi
from mpfit import mpfit
"""
Note about mpfit/leastsq:
I switched everything over to the Markwardt mpfit routine for a few reasons,
but foremost being the ability to set limits on parameters, not just force them
to be fixed. As far as I can tell, leastsq does not have that capability.
The version of mpfit I use can be found here:
http://code.google.com/p/agpy/source/browse/trunk/mpfit
"""
"""
To do:
-turn into a class instead of a collection of objects
-implement WCS-based gaussian fitting with correct coordinates
"""
def moments(data,circle,rotate,vheight,estimator=median,**kwargs):
"""Returns (height, amplitude, x, y, width_x, width_y, rotation angle)
the gaussian parameters of a 2D distribution by calculating its
moments. Depending on the input parameters, will only output
a subset of the above.
If using masked arrays, pass estimator=numpy.ma.median
"""
total = numpy.abs(data).sum()
Y, X = numpy.indices(data.shape) # python convention: reverse x,y numpy.indices
y = numpy.argmax((X*numpy.abs(data)).sum(axis=1)/total)
x = numpy.argmax((Y*numpy.abs(data)).sum(axis=0)/total)
col = data[int(y),:]
# FIRST moment, not second!
width_x = numpy.sqrt(numpy.abs((numpy.arange(col.size)-y)*col).sum()/numpy.abs(col).sum())
row = data[:, int(x)]
width_y = numpy.sqrt(numpy.abs((numpy.arange(row.size)-x)*row).sum()/numpy.abs(row).sum())
width = ( width_x + width_y ) / 2.
height = estimator(data.ravel())
amplitude = data.max()-height
mylist = [amplitude,x,y]
if numpy.isnan(width_y) or numpy.isnan(width_x) or numpy.isnan(height) or numpy.isnan(amplitude):
raise ValueError("something is nan")
if vheight==1:
mylist = [height] + mylist
if circle==0:
mylist = mylist + [width_x,width_y]
if rotate==1:
mylist = mylist + [0.] #rotation "moment" is just zero...
# also, circles don't rotate.
else:
mylist = mylist + [width]
return mylist
def twodgaussian(inpars, circle=False, rotate=True, vheight=True, shape=None):
"""Returns a 2d gaussian function of the form:
x' = numpy.cos(rota) * x - numpy.sin(rota) * y
y' = numpy.sin(rota) * x + numpy.cos(rota) * y
(rota should be in degrees)
g = b + a * numpy.exp ( - ( ((x-center_x)/width_x)**2 +
((y-center_y)/width_y)**2 ) / 2 )
inpars = [b,a,center_x,center_y,width_x,width_y,rota]
(b is background height, a is peak amplitude)
where x and y are the input parameters of the returned function,
and all other parameters are specified by this function
However, the above values are passed by list. The list should be:
inpars = (height,amplitude,center_x,center_y,width_x,width_y,rota)
You can choose to ignore / neglect some of the above input parameters
unumpy.sing the following options:
circle=0 - default is an elliptical gaussian (different x, y
widths), but can reduce the input by one parameter if it's a
circular gaussian
rotate=1 - default allows rotation of the gaussian ellipse. Can
remove last parameter by setting rotate=0
vheight=1 - default allows a variable height-above-zero, i.e. an
additive constant for the Gaussian function. Can remove first
parameter by setting this to 0
shape=None - if shape is set (to a 2-parameter list) then returns
an image with the gaussian defined by inpars
"""
inpars_old = inpars
inpars = list(inpars)
if vheight == 1:
height = inpars.pop(0)
height = float(height)
else:
height = float(0)
amplitude, center_y, center_x = inpars.pop(0),inpars.pop(0),inpars.pop(0)
amplitude = float(amplitude)
center_x = float(center_x)
center_y = float(center_y)
if circle == 1:
width = inpars.pop(0)
width_x = float(width)
width_y = float(width)
rotate = 0
else:
width_x, width_y = inpars.pop(0),inpars.pop(0)
width_x = float(width_x)
width_y = float(width_y)
if rotate == 1:
rota = inpars.pop(0)
rota = pi/180. * float(rota)
rcen_x = center_x * numpy.cos(rota) - center_y * numpy.sin(rota)
rcen_y = center_x * numpy.sin(rota) + center_y * numpy.cos(rota)
else:
rcen_x = center_x
rcen_y = center_y
if len(inpars) > 0:
raise ValueError("There are still input parameters:" + str(inpars) + \
" and you've input: " + str(inpars_old) + \
" circle=%d, rotate=%d, vheight=%d" % (circle,rotate,vheight) )
def rotgauss(x,y):
if rotate==1:
xp = x * numpy.cos(rota) - y * numpy.sin(rota)
yp = x * numpy.sin(rota) + y * numpy.cos(rota)
else:
xp = x
yp = y
g = height+amplitude*numpy.exp(
-(((rcen_x-xp)/width_x)**2+
((rcen_y-yp)/width_y)**2)/2.)
return g
if shape is not None:
return rotgauss(*numpy.indices(shape))
else:
return rotgauss
def gaussfit(data,err=None,params=(),autoderiv=True,return_all=False,circle=False,
fixed=numpy.repeat(False,7),limitedmin=[False,False,False,False,True,True,True],
limitedmax=[False,False,False,False,False,False,True],
usemoment=numpy.array([],dtype='bool'),
minpars=numpy.repeat(0,7),maxpars=[0,0,0,0,0,0,360],
rotate=1,vheight=1,quiet=True,returnmp=False,
returnfitimage=False,**kwargs):
"""
Gaussian fitter with the ability to fit a variety of different forms of
2-dimensional gaussian.
Input Parameters:
data - 2-dimensional data array
err=None - error array with same size as data array
params=[] - initial input parameters for Gaussian function.
(height, amplitude, x, y, width_x, width_y, rota)
if not input, these will be determined from the moments of the system,
assuming no rotation
autoderiv=1 - use the autoderiv provided in the lmder.f function (the
alternative is to us an analytic derivative with lmdif.f: this method
is less robust)
return_all=0 - Default is to return only the Gaussian parameters.
1 - fit params, fit error
returnfitimage - returns (best fit params,best fit image)
returnmp - returns the full mpfit struct
circle=0 - default is an elliptical gaussian (different x, y widths),
but can reduce the input by one parameter if it's a circular gaussian
rotate=1 - default allows rotation of the gaussian ellipse. Can remove
last parameter by setting rotate=0. numpy.expects angle in DEGREES
vheight=1 - default allows a variable height-above-zero, i.e. an
additive constant for the Gaussian function. Can remove first
parameter by setting this to 0
usemoment - can choose which parameters to use a moment estimation for.
Other parameters will be taken from params. Needs to be a boolean
array.
Output:
Default output is a set of Gaussian parameters with the same shape as
the input parameters
Can also output the covariance matrix, 'infodict' that contains a lot
more detail about the fit (see scipy.optimize.leastsq), and a message
from leastsq telling what the exit status of the fitting routine was
Warning: Does NOT necessarily output a rotation angle between 0 and 360 degrees.
"""
usemoment=numpy.array(usemoment,dtype='bool')
params=numpy.array(params,dtype='float')
if usemoment.any() and len(params)==len(usemoment):
moment = numpy.array(moments(data,circle,rotate,vheight,**kwargs),dtype='float')
params[usemoment] = moment[usemoment]
elif params == [] or len(params)==0:
params = (moments(data,circle,rotate,vheight,**kwargs))
if vheight==0:
vheight=1
params = numpy.concatenate([[0],params])
fixed[0] = 1
# mpfit will fail if it is given a start parameter outside the allowed range:
for i in xrange(len(params)):
if params[i] > maxpars[i] and limitedmax[i]: params[i] = maxpars[i]
if params[i] < minpars[i] and limitedmin[i]: params[i] = minpars[i]
if err is None:
errorfunction = lambda p: numpy.ravel((twodgaussian(p,circle,rotate,vheight)\
(*numpy.indices(data.shape)) - data))
else:
errorfunction = lambda p: numpy.ravel((twodgaussian(p,circle,rotate,vheight)\
(*numpy.indices(data.shape)) - data)/err)
def mpfitfun(data,err):
if err is None:
def f(p,fjac=None): return [0,numpy.ravel(data-twodgaussian(p,circle,rotate,vheight)\
(*numpy.indices(data.shape)))]
else:
def f(p,fjac=None): return [0,numpy.ravel((data-twodgaussian(p,circle,rotate,vheight)\
(*numpy.indices(data.shape)))/err)]
return f
parinfo = [
{'n':1,'value':params[1],'limits':[minpars[1],maxpars[1]],'limited':[limitedmin[1],limitedmax[1]],'fixed':fixed[1],'parname':"AMPLITUDE",'error':0},
{'n':2,'value':params[2],'limits':[minpars[2],maxpars[2]],'limited':[limitedmin[2],limitedmax[2]],'fixed':fixed[2],'parname':"XSHIFT",'error':0},
{'n':3,'value':params[3],'limits':[minpars[3],maxpars[3]],'limited':[limitedmin[3],limitedmax[3]],'fixed':fixed[3],'parname':"YSHIFT",'error':0},
{'n':4,'value':params[4],'limits':[minpars[4],maxpars[4]],'limited':[limitedmin[4],limitedmax[4]],'fixed':fixed[4],'parname':"XWIDTH",'error':0} ]
if vheight == 1:
parinfo.insert(0,{'n':0,'value':params[0],'limits':[minpars[0],maxpars[0]],'limited':[limitedmin[0],limitedmax[0]],'fixed':fixed[0],'parname':"HEIGHT",'error':0})
if circle == 0:
parinfo.append({'n':5,'value':params[5],'limits':[minpars[5],maxpars[5]],'limited':[limitedmin[5],limitedmax[5]],'fixed':fixed[5],'parname':"YWIDTH",'error':0})
if rotate == 1:
parinfo.append({'n':6,'value':params[6],'limits':[minpars[6],maxpars[6]],'limited':[limitedmin[6],limitedmax[6]],'fixed':fixed[6],'parname':"ROTATION",'error':0})
if autoderiv == 0:
# the analytic derivative, while not terribly difficult, is less
# efficient and useful. I only bothered putting it here because I was
# instructed to do so for a class project - please ask if you would
# like this feature implemented
raise ValueError("I'm sorry, I haven't implemented this feature yet.")
else:
# p, cov, infodict, errmsg, success = optimize.leastsq(errorfunction,\
# params, full_output=1)
mp = mpfit(mpfitfun(data,err),parinfo=parinfo,quiet=quiet)
if returnmp:
returns = (mp)
elif return_all == 0:
returns = mp.params
elif return_all == 1:
returns = mp.params,mp.perror
if returnfitimage:
fitimage = twodgaussian(mp.params,circle,rotate,vheight)(*numpy.indices(data.shape))
returns = (returns,fitimage)
return returns
def onedmoments(Xax,data,vheight=True,estimator=median,negamp=None,
veryverbose=False, **kwargs):
"""Returns (height, amplitude, x, width_x)
the gaussian parameters of a 1D distribution by calculating its
moments. Depending on the input parameters, will only output
a subset of the above.
If using masked arrays, pass estimator=numpy.ma.median
'estimator' is used to measure the background level (height)
negamp can be used to force the peak negative (True), positive (False),
or it will be "autodetected" (negamp=None)
"""
dx = numpy.mean(Xax[1:] - Xax[:-1]) # assume a regular grid
integral = (data*dx).sum()
height = estimator(data)
# try to figure out whether pos or neg based on the minimum width of the pos/neg peaks
Lpeakintegral = integral - height*len(Xax)*dx - (data[data>height]*dx).sum()
Lamplitude = data.min()-height
Lwidth_x = 0.5*(numpy.abs(Lpeakintegral / Lamplitude))
Hpeakintegral = integral - height*len(Xax)*dx - (data[data<height]*dx).sum()
Hamplitude = data.max()-height
Hwidth_x = 0.5*(numpy.abs(Hpeakintegral / Hamplitude))
Lstddev = Xax[data<data.mean()].std()
Hstddev = Xax[data>data.mean()].std()
#print "Lstddev: %10.3g Hstddev: %10.3g" % (Lstddev,Hstddev)
#print "Lwidth_x: %10.3g Hwidth_x: %10.3g" % (Lwidth_x,Hwidth_x)
if negamp: # can force the guess to be negative
xcen,amplitude,width_x = Xax[numpy.argmin(data)],Lamplitude,Lwidth_x
elif negamp is None:
if Hstddev < Lstddev:
xcen,amplitude,width_x, = Xax[numpy.argmax(data)],Hamplitude,Hwidth_x
else:
xcen,amplitude,width_x, = Xax[numpy.argmin(data)],Lamplitude,Lwidth_x
else: # if negamp==False, make positive
xcen,amplitude,width_x = Xax[numpy.argmax(data)],Hamplitude,Hwidth_x
if veryverbose:
print "negamp: %s amp,width,cen Lower: %g, %g Upper: %g, %g Center: %g" %\
(negamp,Lamplitude,Lwidth_x,Hamplitude,Hwidth_x,xcen)
mylist = [amplitude,xcen,width_x]
if numpy.isnan(width_x) or numpy.isnan(height) or numpy.isnan(amplitude):
raise ValueError("something is nan")
if vheight:
mylist = [height] + mylist
return mylist
def onedgaussian(x,H,A,dx,w):
"""
Returns a 1-dimensional gaussian of form
H+A*numpy.exp(-(x-dx)**2/(2*w**2))
"""
return H+A*numpy.exp(-(x-dx)**2/(2*w**2))
def onedgaussfit(xax, data, err=None,
params=[0,1,0,1],fixed=[False,False,False,False],
limitedmin=[False,False,False,True],
limitedmax=[False,False,False,False], minpars=[0,0,0,0],
maxpars=[0,0,0,0], quiet=True, shh=True,
veryverbose=False,
vheight=True, negamp=False,
usemoments=False):
"""
Inputs:
xax - x axis
data - y axis
err - error corresponding to data
params - Fit parameters: Height of background, Amplitude, Shift, Width
fixed - Is parameter fixed?
limitedmin/minpars - set lower limits on each parameter (default: width>0)
limitedmax/maxpars - set upper limits on each parameter
quiet - should MPFIT output each iteration?
shh - output final parameters?
usemoments - replace default parameters with moments
Returns:
Fit parameters
Model
Fit errors
chi2
"""
def mpfitfun(x,y,err):
if err is None:
def f(p,fjac=None): return [0,(y-onedgaussian(x,*p))]
else:
def f(p,fjac=None): return [0,(y-onedgaussian(x,*p))/err]
return f
if xax == None:
xax = numpy.arange(len(data))
if vheight is False:
height = params[0]
fixed[0] = True
if usemoments:
params = onedmoments(xax,data,vheight=vheight,negamp=negamp, veryverbose=veryverbose)
if vheight is False: params = [height]+params
if veryverbose: print "OneD moments: h: %g a: %g c: %g w: %g" % tuple(params)
parinfo = [ {'n':0,'value':params[0],'limits':[minpars[0],maxpars[0]],'limited':[limitedmin[0],limitedmax[0]],'fixed':fixed[0],'parname':"HEIGHT",'error':0} ,
{'n':1,'value':params[1],'limits':[minpars[1],maxpars[1]],'limited':[limitedmin[1],limitedmax[1]],'fixed':fixed[1],'parname':"AMPLITUDE",'error':0},
{'n':2,'value':params[2],'limits':[minpars[2],maxpars[2]],'limited':[limitedmin[2],limitedmax[2]],'fixed':fixed[2],'parname':"SHIFT",'error':0},
{'n':3,'value':params[3],'limits':[minpars[3],maxpars[3]],'limited':[limitedmin[3],limitedmax[3]],'fixed':fixed[3],'parname':"WIDTH",'error':0}]
mp = mpfit(mpfitfun(xax,data,err),parinfo=parinfo,quiet=quiet)
mpp = mp.params
mpperr = mp.perror
chi2 = mp.fnorm
if mp.status == 0:
raise Exception(mp.errmsg)
if (not shh) or veryverbose:
print "Fit status: ",mp.status
for i,p in enumerate(mpp):
parinfo[i]['value'] = p
print parinfo[i]['parname'],p," +/- ",mpperr[i]
print "Chi2: ",mp.fnorm," Reduced Chi2: ",mp.fnorm/len(data)," DOF:",len(data)-len(mpp)
return mpp,onedgaussian(xax,*mpp),mpperr,chi2
def onedloggaussian(x,H,A,dx,w):
"""
Returns a 1-dimensional gaussian of form
H+A*numpy.exp(-(x-dx)**2/(2*w**2))
"""
return H+A*numpy.exp(-(numpy.log10(x)-dx)**2/(2*w**2))
def onedloggaussfit(xax, data, err=None,
params=[0,1,0,1],fixed=[False,False,False,False],
limitedmin=[False,False,False,True],
limitedmax=[False,False,False,False], minpars=[0,0,0,0],
maxpars=[0,0,0,0], quiet=True, shh=True,
veryverbose=False,
vheight=True, negamp=False,
usemoments=False):
"""
Inputs:
xax - x axis
data - y axis
err - error corresponding to data
params - Fit parameters: Height of background, Amplitude, Shift, Width
fixed - Is parameter fixed?
limitedmin/minpars - set lower limits on each parameter (default: width>0)
limitedmax/maxpars - set upper limits on each parameter
quiet - should MPFIT output each iteration?
shh - output final parameters?
usemoments - replace default parameters with moments
Returns:
Fit parameters
Model
Fit errors
chi2
"""
def mpfitfun(x,y,err):
if err is None:
def f(p,fjac=None): return [0,(y-onedloggaussian(x,*p))]
else:
def f(p,fjac=None): return [0,(y-onedloggaussian(x,*p))/err]
return f
if xax == None:
xax = numpy.arange(len(data))
if vheight is False:
height = params[0]
fixed[0] = True
if usemoments:
params = onedmoments(xax,data,vheight=vheight,negamp=negamp, veryverbose=veryverbose)
if vheight is False: params = [height]+params
if veryverbose: print "OneD moments: h: %g a: %g c: %g w: %g" % tuple(params)
parinfo = [ {'n':0,'value':params[0],'limits':[minpars[0],maxpars[0]],'limited':[limitedmin[0],limitedmax[0]],'fixed':fixed[0],'parname':"HEIGHT",'error':0} ,
{'n':1,'value':params[1],'limits':[minpars[1],maxpars[1]],'limited':[limitedmin[1],limitedmax[1]],'fixed':fixed[1],'parname':"AMPLITUDE",'error':0},
{'n':2,'value':params[2],'limits':[minpars[2],maxpars[2]],'limited':[limitedmin[2],limitedmax[2]],'fixed':fixed[2],'parname':"SHIFT",'error':0},
{'n':3,'value':params[3],'limits':[minpars[3],maxpars[3]],'limited':[limitedmin[3],limitedmax[3]],'fixed':fixed[3],'parname':"WIDTH",'error':0}]
mp = mpfit(mpfitfun(xax,data,err),parinfo=parinfo,quiet=quiet)
mpp = mp.params
mpperr = mp.perror
chi2 = mp.fnorm
if mp.status == 0:
raise Exception(mp.errmsg)
if (not shh) or veryverbose:
print "Fit status: ",mp.status
for i,p in enumerate(mpp):
parinfo[i]['value'] = p
print parinfo[i]['parname'],p," +/- ",mpperr[i]
print "Chi2: ",mp.fnorm," Reduced Chi2: ",mp.fnorm/len(data)," DOF:",len(data)-len(mpp)
return mpp,onedloggaussian(xax,*mpp),mpperr,chi2
def n_gaussian(pars=None,a=None,dx=None,sigma=None):
"""
Returns a function that sums over N gaussians, where N is the length of
a,dx,sigma *OR* N = len(pars) / 3
The background "height" is assumed to be zero (you must "baseline" your
spectrum before fitting)
pars - a list with len(pars) = 3n, assuming a,dx,sigma repeated
dx - offset (velocity center) values
sigma - line widths
a - amplitudes
"""
if len(pars) % 3 == 0:
a = [pars[ii] for ii in xrange(0,len(pars),3)]
dx = [pars[ii] for ii in xrange(1,len(pars),3)]
sigma = [pars[ii] for ii in xrange(2,len(pars),3)]
elif not(len(dx) == len(sigma) == len(a)):
raise ValueError("Wrong array lengths! dx: %i sigma: %i a: %i" % (len(dx),len(sigma),len(a)))
def g(x):
v = numpy.zeros(len(x))
for i in range(len(dx)):
v += a[i] * numpy.exp( - ( x - dx[i] )**2 / (2.0*sigma[i]**2) )
return v
return g
def multigaussfit(xax, data, ngauss=1, err=None, params=[1,0,1],
fixed=[False,False,False], limitedmin=[False,False,True],
limitedmax=[False,False,False], minpars=[0,0,0], maxpars=[0,0,0],
quiet=True, shh=True, veryverbose=False):
"""
An improvement on onedgaussfit. Lets you fit multiple gaussians.
Inputs:
xax - x axis
data - y axis
ngauss - How many gaussians to fit? Default 1 (this could supersede onedgaussfit)
err - error corresponding to data
These parameters need to have length = 3*ngauss. If ngauss > 1 and length = 3, they will
be replicated ngauss times, otherwise they will be reset to defaults:
params - Fit parameters: [amplitude, offset, width] * ngauss
If len(params) % 3 == 0, ngauss will be set to len(params) / 3
fixed - Is parameter fixed?
limitedmin/minpars - set lower limits on each parameter (default: width>0)
limitedmax/maxpars - set upper limits on each parameter
quiet - should MPFIT output each iteration?
shh - output final parameters?
Returns:
Fit parameters
Model
Fit errors
chi2
"""
if len(params) != ngauss and (len(params) / 3) > ngauss:
ngauss = len(params) / 3
if isinstance(params,numpy.ndarray): params=params.tolist()
# make sure all various things are the right length; if they're not, fix them using the defaults
for parlist in (params,fixed,limitedmin,limitedmax,minpars,maxpars):
if len(parlist) != 3*ngauss:
# if you leave the defaults, or enter something that can be multiplied by 3 to get to the
# right number of gaussians, it will just replicate
if len(parlist) == 3:
parlist *= ngauss
elif parlist==params:
parlist[:] = [1,0,1] * ngauss
elif parlist==fixed or parlist==limitedmax:
parlist[:] = [False,False,False] * ngauss
elif parlist==limitedmin:
parlist[:] = [False,False,True] * ngauss
elif parlist==minpars or parlist==maxpars:
parlist[:] = [0,0,0] * ngauss
def mpfitfun(x,y,err):
if err is None:
def f(p,fjac=None): return [0,(y-n_gaussian(pars=p)(x))]
else:
def f(p,fjac=None): return [0,(y-n_gaussian(pars=p)(x))/err]
return f
if xax == None:
xax = numpy.arange(len(data))
parnames = {0:"AMPLITUDE",1:"SHIFT",2:"WIDTH"}
parinfo = [ {'n':ii, 'value':params[ii],
'limits':[minpars[ii],maxpars[ii]],
'limited':[limitedmin[ii],limitedmax[ii]], 'fixed':fixed[ii],
'parname':parnames[ii%3]+str(ii%3), 'error':ii}
for ii in xrange(len(params)) ]
if veryverbose:
print "GUESSES: "
print "\n".join(["%s: %s" % (p['parname'],p['value']) for p in parinfo])
mp = mpfit(mpfitfun(xax,data,err),parinfo=parinfo,quiet=quiet)
mpp = mp.params
mpperr = mp.perror
chi2 = mp.fnorm
if mp.status == 0:
raise Exception(mp.errmsg)
if not shh:
print "Final fit values: "
for i,p in enumerate(mpp):
parinfo[i]['value'] = p
print parinfo[i]['parname'],p," +/- ",mpperr[i]
print "Chi2: ",mp.fnorm," Reduced Chi2: ",mp.fnorm/len(data)," DOF:",len(data)-len(mpp)
return mpp,n_gaussian(pars=mpp)(xax),mpperr,chi2
def collapse_gaussfit(cube,xax=None,axis=2,negamp=False,usemoments=True,nsigcut=1.0,mppsigcut=1.0,
return_errors=False, **kwargs):
import time
std_coll = cube.std(axis=axis)
std_coll[std_coll==0] = numpy.nan # must eliminate all-zero spectra
mean_std = median(std_coll[std_coll==std_coll])
if axis > 0:
cube = cube.swapaxes(0,axis)
width_arr = numpy.zeros(cube.shape[1:]) + numpy.nan
amp_arr = numpy.zeros(cube.shape[1:]) + numpy.nan
chi2_arr = numpy.zeros(cube.shape[1:]) + numpy.nan
offset_arr = numpy.zeros(cube.shape[1:]) + numpy.nan
width_err = numpy.zeros(cube.shape[1:]) + numpy.nan
amp_err = numpy.zeros(cube.shape[1:]) + numpy.nan
offset_err = numpy.zeros(cube.shape[1:]) + numpy.nan
if xax is None:
xax = numpy.arange(cube.shape[0])
starttime = time.time()
print "Cube shape: ",cube.shape
if negamp: extremum=numpy.min
else: extremum=numpy.max
print "Fitting a total of %i spectra with peak signal above %f" % ((numpy.abs(extremum(cube,axis=0)) > (mean_std*nsigcut)).sum(),mean_std*nsigcut)
for i in xrange(cube.shape[1]):
t0 = time.time()
nspec = (numpy.abs(extremum(cube[:,i,:],axis=0)) > (mean_std*nsigcut)).sum()
print "Working on row %d with %d spectra to fit" % (i,nspec) ,
for j in xrange(cube.shape[2]):
if numpy.abs(extremum(cube[:,i,j])) > (mean_std*nsigcut):
mpp,gfit,mpperr,chi2 = onedgaussfit(xax,cube[:,i,j],err=numpy.ones(cube.shape[0])*mean_std,negamp=negamp,usemoments=usemoments,**kwargs)
if numpy.abs(mpp[1]) > (mpperr[1]*mppsigcut):
width_arr[i,j] = mpp[3]
offset_arr[i,j] = mpp[2]
chi2_arr[i,j] = chi2
amp_arr[i,j] = mpp[1]
width_err[i,j] = mpperr[3]
offset_err[i,j] = mpperr[2]
amp_err[i,j] = mpperr[1]
dt = time.time()-t0
if nspec > 0:
print "in %f seconds (average: %f)" % (dt,dt/float(nspec))
else:
print "in %f seconds" % (dt)
print "Total time %f seconds" % (time.time()-starttime)
if return_errors:
return width_arr,offset_arr,amp_arr,width_err,offset_err,amp_err,chi2_arr
else:
return width_arr,offset_arr,amp_arr,chi2_arr
|
chrox/RealTimeElectrophy
|
Experimenter/DataProcessing/Fitting/gaussfitter.py
|
Python
|
bsd-2-clause
| 26,671
|
[
"Gaussian"
] |
4cf256a467c1edc543d0699f301197c9977b399a311e10715ec437c64f9b1ef9
|
from rdkit import DataStructs
from rdkit import RDConfig
import unittest, os
def feq(a, b, tol=1e-4):
return abs(a - b) < tol
class TestCase(unittest.TestCase):
def setUp(self):
self.dirname = os.path.join(RDConfig.RDBaseDir, 'Code', 'DataStructs', 'testData')
self.filename = os.path.join(self.dirname, 'zim.head100.fpb')
self.fpbr = DataStructs.FPBReader(self.filename)
self.fpbr.Init()
def test1Basics(self):
self.assertEqual(len(self.fpbr), 100)
self.assertEqual(self.fpbr.GetNumBits(), 2048)
self.assertEqual(self.fpbr.GetId(0), "ZINC00902219")
self.assertEqual(self.fpbr.GetId(3), "ZINC04803506")
fp = self.fpbr.GetFP(0)
self.assertEqual(fp.GetNumBits(), 2048)
self.assertEqual(fp.GetNumOnBits(), 17)
obs = (1, 80, 183, 222, 227, 231, 482, 650, 807, 811, 831, 888, 1335, 1411, 1664, 1820, 1917)
obl = tuple(fp.GetOnBits())
self.assertEqual(obs, obl)
# test operator[]
fp, nm = self.fpbr[0]
self.assertEqual(nm, "ZINC00902219")
self.assertEqual(fp.GetNumOnBits(), 17)
def test2Tanimoto(self):
bv = self.fpbr.GetBytes(0)
self.assertAlmostEqual(self.fpbr.GetTanimoto(0, bv), 1.0, 4)
self.assertAlmostEqual(self.fpbr.GetTanimoto(1, bv), 0.3704, 4)
tpl = self.fpbr.GetTanimotoNeighbors(bv)
self.assertEqual(len(tpl), 1)
self.assertEqual(tpl[0][1], 0)
self.assertAlmostEqual(tpl[0][0], 1., 4)
tpl = self.fpbr.GetTanimotoNeighbors(bv, threshold=0.3)
self.assertEqual(len(tpl), 5)
self.assertEqual(tpl[0][1], 0)
self.assertAlmostEqual(tpl[0][0], 1., 4)
self.assertEqual(tpl[1][1], 1)
self.assertAlmostEqual(tpl[1][0], 0.3704, 4)
def test3Tversky(self):
bv = self.fpbr.GetBytes(0)
self.assertAlmostEqual(self.fpbr.GetTversky(0, bv, 1, 1), 1.0, 4)
self.assertAlmostEqual(self.fpbr.GetTversky(1, bv, 1, 1), 0.3704, 4)
tpl = self.fpbr.GetTverskyNeighbors(bv, 1, 1)
self.assertEqual(len(tpl), 1)
self.assertEqual(tpl[0][1], 0)
self.assertAlmostEqual(tpl[0][0], 1., 4)
tpl = self.fpbr.GetTverskyNeighbors(bv, 1, 1, threshold=0.3)
self.assertEqual(len(tpl), 5)
self.assertEqual(tpl[0][1], 0)
self.assertAlmostEqual(tpl[0][0], 1., 4)
self.assertEqual(tpl[1][1], 1)
self.assertAlmostEqual(tpl[1][0], 0.3704, 4)
def test4Contains(self):
bv = self.fpbr.GetBytes(0)
nbrs = self.fpbr.GetContainingNeighbors(bv)
self.assertEqual(len(nbrs), 1)
self.assertEqual(nbrs[0], 0)
bv = self.fpbr.GetBytes(1)
nbrs = self.fpbr.GetContainingNeighbors(bv)
self.assertEqual(len(nbrs), 4)
self.assertEqual(nbrs, (1, 2, 3, 4))
def test5Contains(self):
" an example based on substructure screening "
filename = os.path.join(RDConfig.RDBaseDir, 'Code', 'DataStructs', 'testData',
'zinc_all_clean.100.patt1k.fpb')
fpbr = DataStructs.FPBReader(filename)
fpbr.Init()
bytes = b'\x00\x00\x00\x00\x00\x00@\x00\x00\x00\x00\x00\x00\x00\x00\x00\x000\x00@\x00 \x00\x00 \x00\x00\x02@\x00\x00\x00\x00\x00\x00\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00`\x07\x00\x04\x00"\x14\x02\x00\x00"\x00\x00\x00\x00\x08\x00\x80\x00\x00@\x00@\x00\x80\x00\x00\x00\x00B\x00\x00\x80\x00\x80\x08\x00\x04\x00@\x00\x00\x00\x00\x10\x00\x00\x00\x00\x00 \x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x80\x04\x00\x00\x0c\x00\x00\x00@\x88\x10\x10\x00\x00\x88\x00@'
nbrs = fpbr.GetContainingNeighbors(bytes)
self.assertEqual(len(nbrs), 9)
ids = sorted(fpbr.GetId(x) for x in nbrs)
self.assertEqual(ids, ['ZINC00000562', 'ZINC00000843', 'ZINC00000969', 'ZINC00001484',
'ZINC00001585', 'ZINC00002094', 'ZINC00004739', 'ZINC00005235',
'ZINC00006300'])
def test6MultiFPBReaderTani(self):
basen = os.path.join(RDConfig.RDBaseDir, 'Code', 'DataStructs', 'testData')
mfpbr = DataStructs.MultiFPBReader()
self.assertEqual(
mfpbr.AddReader(DataStructs.FPBReader(os.path.join(basen, "zinc_random200.1.patt.fpb"))), 1)
self.assertEqual(
mfpbr.AddReader(DataStructs.FPBReader(os.path.join(basen, "zinc_random200.2.patt.fpb"))), 2)
self.assertEqual(
mfpbr.AddReader(DataStructs.FPBReader(os.path.join(basen, "zinc_random200.3.patt.fpb"))), 3)
self.assertEqual(
mfpbr.AddReader(DataStructs.FPBReader(os.path.join(basen, "zinc_random200.4.patt.fpb"))), 4)
mfpbr.Init()
self.assertEqual(mfpbr.GetNumBits(), 1024)
self.assertEqual(len(mfpbr), 4)
fps = "0000000000404000100000001000040000300040222000002004000240000020000000"+\
"8200010200000090000024040860070044003214820000220401054008018000226000"+\
"4800800140000042000080008008020482400000200410800000300430200800400000"+\
"0000080a0000800400010c800200648818100010880040"
ebv = DataStructs.CreateFromFPSText(fps)
bytes = DataStructs.BitVectToBinaryText(ebv)
nbrs = mfpbr.GetTanimotoNeighbors(bytes, threshold=0.6)
self.assertEqual(len(nbrs), 6)
self.assertAlmostEqual(nbrs[0][0], 0.66412, 4)
self.assertEqual(nbrs[0][1], 0)
self.assertEqual(nbrs[0][2], 3)
self.assertAlmostEqual(nbrs[1][0], 0.65289, 4)
self.assertEqual(nbrs[1][1], 1)
self.assertEqual(nbrs[1][2], 2)
self.assertAlmostEqual(nbrs[2][0], 0.64341, 4)
self.assertEqual(nbrs[2][1], 2)
self.assertEqual(nbrs[2][2], 1)
self.assertAlmostEqual(nbrs[3][0], 0.61940, 4)
self.assertEqual(nbrs[3][1], 1)
self.assertEqual(nbrs[3][2], 0)
self.assertAlmostEqual(nbrs[4][0], 0.61905, 4)
self.assertEqual(nbrs[4][1], 0)
self.assertEqual(nbrs[4][2], 0)
self.assertAlmostEqual(nbrs[5][0], 0.61344, 4)
self.assertEqual(nbrs[5][1], 0)
self.assertEqual(nbrs[5][2], 1)
# test multi-threaded (won't do anything if the RDKit isn't compiled with threads support)
nbrs = mfpbr.GetTanimotoNeighbors(bytes, threshold=0.6, numThreads=4)
self.assertEqual(len(nbrs), 6)
self.assertAlmostEqual(nbrs[0][0], 0.66412, 4)
self.assertEqual(nbrs[0][1], 0)
self.assertEqual(nbrs[0][2], 3)
self.assertAlmostEqual(nbrs[1][0], 0.65289, 4)
self.assertEqual(nbrs[1][1], 1)
self.assertEqual(nbrs[1][2], 2)
self.assertAlmostEqual(nbrs[2][0], 0.64341, 4)
self.assertEqual(nbrs[2][1], 2)
self.assertEqual(nbrs[2][2], 1)
self.assertAlmostEqual(nbrs[3][0], 0.61940, 4)
self.assertEqual(nbrs[3][1], 1)
self.assertEqual(nbrs[3][2], 0)
self.assertAlmostEqual(nbrs[4][0], 0.61905, 4)
self.assertEqual(nbrs[4][1], 0)
self.assertEqual(nbrs[4][2], 0)
self.assertAlmostEqual(nbrs[5][0], 0.61344, 4)
self.assertEqual(nbrs[5][1], 0)
self.assertEqual(nbrs[5][2], 1)
def test7MultiFPBReaderContains(self):
basen = os.path.join(RDConfig.RDBaseDir, 'Code', 'DataStructs', 'testData')
mfpbr = DataStructs.MultiFPBReader()
self.assertEqual(
mfpbr.AddReader(DataStructs.FPBReader(os.path.join(basen, "zinc_random200.1.patt.fpb"))), 1)
self.assertEqual(
mfpbr.AddReader(DataStructs.FPBReader(os.path.join(basen, "zinc_random200.2.patt.fpb"))), 2)
self.assertEqual(
mfpbr.AddReader(DataStructs.FPBReader(os.path.join(basen, "zinc_random200.3.patt.fpb"))), 3)
self.assertEqual(
mfpbr.AddReader(DataStructs.FPBReader(os.path.join(basen, "zinc_random200.4.patt.fpb"))), 4)
mfpbr.Init()
self.assertEqual(mfpbr.GetNumBits(), 1024)
self.assertEqual(len(mfpbr), 4)
fps = "40081010824820021000500010110410003000402b20285000a4040240010030050000"+\
"080001420040009000003d04086007080c03b31d920004220400074008098010206080"+\
"00488001080000c64002a00080000200024c2000602410049200340820200002400010"+\
"02200106090401056801080182006088101000088a0048"
ebv = DataStructs.CreateFromFPSText(fps)
bytes = DataStructs.BitVectToBinaryText(ebv)
nbrs = mfpbr.GetContainingNeighbors(bytes)
self.assertEqual(len(nbrs), 9)
self.assertEqual(nbrs[0][0], 160)
self.assertEqual(nbrs[0][1], 0)
self.assertEqual(nbrs[1][0], 163)
self.assertEqual(nbrs[1][1], 0)
self.assertEqual(nbrs[2][0], 170)
self.assertEqual(nbrs[2][1], 0)
self.assertEqual(nbrs[3][0], 180)
self.assertEqual(nbrs[3][1], 2)
self.assertEqual(nbrs[4][0], 182)
self.assertEqual(nbrs[4][1], 3)
self.assertEqual(nbrs[5][0], 185)
self.assertEqual(nbrs[5][1], 0)
self.assertEqual(nbrs[6][0], 189)
self.assertEqual(nbrs[6][1], 0)
self.assertEqual(nbrs[7][0], 192)
self.assertEqual(nbrs[7][1], 3)
self.assertEqual(nbrs[8][0], 193)
self.assertEqual(nbrs[8][1], 0)
nbrs = mfpbr.GetContainingNeighbors(bytes, numThreads=4)
self.assertEqual(len(nbrs), 9)
self.assertEqual(nbrs[0][0], 160)
self.assertEqual(nbrs[0][1], 0)
self.assertEqual(nbrs[1][0], 163)
self.assertEqual(nbrs[1][1], 0)
self.assertEqual(nbrs[2][0], 170)
self.assertEqual(nbrs[2][1], 0)
self.assertEqual(nbrs[3][0], 180)
self.assertEqual(nbrs[3][1], 2)
self.assertEqual(nbrs[4][0], 182)
self.assertEqual(nbrs[4][1], 3)
self.assertEqual(nbrs[5][0], 185)
self.assertEqual(nbrs[5][1], 0)
self.assertEqual(nbrs[6][0], 189)
self.assertEqual(nbrs[6][1], 0)
self.assertEqual(nbrs[7][0], 192)
self.assertEqual(nbrs[7][1], 3)
self.assertEqual(nbrs[8][0], 193)
self.assertEqual(nbrs[8][1], 0)
def test8MultiFPBReaderContainsInitOnSearch(self):
basen = os.path.join(RDConfig.RDBaseDir, 'Code', 'DataStructs', 'testData')
mfpbr = DataStructs.MultiFPBReader(initOnSearch=True)
self.assertEqual(
mfpbr.AddReader(DataStructs.FPBReader(os.path.join(basen, "zinc_random200.1.patt.fpb"))), 1)
self.assertEqual(
mfpbr.AddReader(DataStructs.FPBReader(os.path.join(basen, "zinc_random200.2.patt.fpb"))), 2)
self.assertEqual(
mfpbr.AddReader(DataStructs.FPBReader(os.path.join(basen, "zinc_random200.3.patt.fpb"))), 3)
self.assertEqual(
mfpbr.AddReader(DataStructs.FPBReader(os.path.join(basen, "zinc_random200.4.patt.fpb"))), 4)
fps = "40081010824820021000500010110410003000402b20285000a4040240010030050000"+\
"080001420040009000003d04086007080c03b31d920004220400074008098010206080"+\
"00488001080000c64002a00080000200024c2000602410049200340820200002400010"+\
"02200106090401056801080182006088101000088a0048"
ebv = DataStructs.CreateFromFPSText(fps)
bytes = DataStructs.BitVectToBinaryText(ebv)
nbrs = mfpbr.GetContainingNeighbors(bytes, numThreads=4)
self.assertEqual(len(nbrs), 9)
self.assertEqual(nbrs[0][0], 160)
self.assertEqual(nbrs[0][1], 0)
self.assertEqual(nbrs[1][0], 163)
self.assertEqual(nbrs[1][1], 0)
self.assertEqual(nbrs[2][0], 170)
self.assertEqual(nbrs[2][1], 0)
self.assertEqual(nbrs[3][0], 180)
self.assertEqual(nbrs[3][1], 2)
self.assertEqual(nbrs[4][0], 182)
self.assertEqual(nbrs[4][1], 3)
self.assertEqual(nbrs[5][0], 185)
self.assertEqual(nbrs[5][1], 0)
self.assertEqual(nbrs[6][0], 189)
self.assertEqual(nbrs[6][1], 0)
self.assertEqual(nbrs[7][0], 192)
self.assertEqual(nbrs[7][1], 3)
self.assertEqual(nbrs[8][0], 193)
self.assertEqual(nbrs[8][1], 0)
def test9MultiFPBReaderEdges(self):
basen = os.path.join(RDConfig.RDBaseDir, 'Code', 'DataStructs', 'testData')
mfpbr = DataStructs.MultiFPBReader()
mfpbr.Init()
fps = "0000000000404000100000001000040000300040222000002004000240000020000000"+\
"8200010200000090000024040860070044003214820000220401054008018000226000"+\
"4800800140000042000080008008020482400000200410800000300430200800400000"+\
"0000080a0000800400010c800200648818100010880040"
ebv = DataStructs.CreateFromFPSText(fps)
bytes = DataStructs.BitVectToBinaryText(ebv)
nbrs = mfpbr.GetTanimotoNeighbors(bytes, threshold=0.6)
self.assertEqual(len(nbrs), 0)
if __name__ == '__main__':
unittest.main()
|
rvianello/rdkit
|
Code/DataStructs/Wrap/testFPB.py
|
Python
|
bsd-3-clause
| 11,861
|
[
"RDKit"
] |
8df5a368dcd2d58950e85020b8c815a2ae75687741e9d14345f1593563ea646a
|
"""
It is used to test client->db-> service.
It requires the Monitoring service to be running and installed (so discoverable in the .cfg),
and this monitoring service should be connecting to an ElasticSeach instance
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=invalid-name,wrong-import-position
import time
import json
from datetime import datetime
import pytest
from DIRAC.tests.Utilities.utils import find_all
from DIRAC.Core.Utilities.DIRACScript import DIRACScript as Script
Script.parseCommandLine()
from DIRAC import gLogger
from DIRAC.MonitoringSystem.Client.MonitoringClient import MonitoringClient
from DIRAC.Core.Tornado.Client.ClientSelector import TransferClientSelector as TransferClient
from DIRAC.Core.Utilities.JEncode import strToIntDict
#############################################
gLogger.setLevel("DEBUG")
client = MonitoringClient()
# fixture for preparation + teardown
@pytest.fixture
def putAndDelete():
# Find the test data
fj = find_all("WMSHistory_testData.json", "../", "tests/Integration/Monitoring")[0]
with open(fj) as fp:
data = json.load(fp)
# put
res = client.addRecords("wmshistory_index", "WMSHistory", data)
assert res["OK"]
assert res["Value"] == len(data)
time.sleep(5)
yield putAndDelete
# from here on is teardown
# delete the index
today = datetime.today().strftime("%Y-%m-%d")
result = "%s-%s" % ("wmshistory_index", today)
client.deleteIndex(result)
#############################################
# actual tests
#############################################
def test_listReports(putAndDelete):
result = client.listReports("WMSHistory")
assert result["OK"], result["Message"]
assert result["Value"] == ["AverageNumberOfJobs", "NumberOfJobs", "NumberOfReschedules"]
def test_listUniqueKeyValues(putAndDelete):
result = client.listUniqueKeyValues("WMSHistory")
assert result["OK"], result["Message"]
assert "Status" in result["Value"]
assert "JobSplitType" in result["Value"]
assert "MinorStatus" in result["Value"]
assert "Site" in result["Value"]
assert "ApplicationStatus" in result["Value"]
assert "User" in result["Value"]
assert "JobGroup" in result["Value"]
assert "UserGroup" in result["Value"]
assert result["Value"] == {
u"Status": [],
u"JobSplitType": [],
u"MinorStatus": [],
u"Site": [],
u"ApplicationStatus": [],
u"User": [],
u"JobGroup": [],
u"UserGroup": [],
}
def test_generateDelayedPlot(putAndDelete):
params = (
"WMSHistory",
"NumberOfJobs",
datetime(2016, 3, 16, 12, 30, 0, 0),
datetime(2016, 3, 17, 19, 29, 0, 0),
{"grouping": ["Site"]},
"Site",
{},
)
result = client.generateDelayedPlot(*params)
assert result["OK"], result["Message"]
# self.assertEqual(
# result['Value'],
# {
# plot = 'Z:eNpljcEKwjAQRH8piWLbvQkeRLAeKnhOm7Us2CTsbsH69UYUFIQZZvawb4LUMKQYdjRoKH3kNGeK403W0JEiolSAMZ\
# xpwodXcsZukFZItipukFyxeSmiNIB3Zb_lUQL-wD4ssQYYc2Jt_VQuB-089cin6yH1Ur5FPev_\
# UgnrSjXfpRp0yfjGGLgcuz2JJl7wCYg6Slo='
# 'plot': plot,
# 'thumbnail': False})
# tempFile = tempfile.TemporaryFile()
# transferClient = TransferClient('Monitoring/Monitoring')
# result = transferClient.receiveFile(tempFile, result['Value']['plot'])
# assert result['OK'], result['Message']
def test_getReport(putAndDelete):
params = (
"WMSHistory",
"NumberOfJobs",
datetime(2016, 3, 16, 12, 30, 0, 0),
datetime(2016, 3, 17, 19, 29, 0, 0),
{"grouping": ["Site"]},
"Site",
{},
)
result = client.getReport(*params)
assert result["OK"], result["Message"]
result["Value"]["data"] = {site: strToIntDict(value) for site, value in result["Value"]["data"].items()}
assert result["Value"] == {
"data": {
u"Multiple": {1458198000: 227.0},
u"LCG.RRCKI.ru": {1458225000: 3.0},
u"LCG.IHEP.su": {1458217800: 18.0},
u"LCG.CNAF.it": {
1458144000: None,
1458172800: None,
1458194400: None,
1458145800: None,
1458189000: None,
1458147600: None,
1458178200: None,
1458183600: None,
1458212400: None,
1458149400: None,
1458207000: None,
1458151200: None,
1458169200: None,
1458201600: None,
1458153000: None,
1458196200: None,
1458154800: None,
1458174600: None,
1458190800: None,
1458156600: None,
1458185400: None,
1458214200: None,
1458158400: None,
1458180000: None,
1458216000: None,
1458208800: None,
1458160200: None,
1458203400: None,
1458162000: None,
1458142200: None,
1458198000: None,
1458163800: None,
1458192600: None,
1458165600: None,
1458176400: None,
1458187200: None,
1458167400: None,
1458210600: None,
1458140400: 4.0,
1458181800: None,
1458205200: None,
1458171000: None,
1458217800: 22.0,
1458199800: None,
},
u"LCG.NIKHEF.nl": {1458217800: 27.0},
u"LCG.Bari.it": {1458221400: 34.0},
u"Group.RAL.uk": {1458140400: 34.0},
u"LCG.DESYZN.de": {1458225000: 43.0},
u"LCG.RAL.uk": {
1458144000: None,
1458158400: None,
1458194400: None,
1458145800: None,
1458223200: None,
1458189000: None,
1458221400: None,
1458225000: 5.0,
1458147600: None,
1458135000: None,
1458183600: None,
1458212400: None,
1458149400: None,
1458178200: None,
1458207000: None,
1458151200: None,
1458169200: None,
1458172800: None,
1458219600: None,
1458201600: None,
1458153000: None,
1458196200: None,
1458154800: None,
1458160200: None,
1458190800: None,
1458156600: None,
1458185400: None,
1458214200: None,
1458129600: 2.0,
1458165600: None,
1458180000: None,
1458216000: None,
1458208800: None,
1458131400: None,
1458174600: None,
1458203400: None,
1458162000: None,
1458171000: None,
1458198000: None,
1458163800: None,
1458192600: None,
1458136800: None,
1458133200: None,
1458187200: None,
1458167400: None,
1458181800: None,
1458210600: None,
1458140400: None,
1458138600: None,
1458176400: None,
1458205200: None,
1458142200: None,
1458217800: None,
1458199800: None,
},
u"LCG.PIC.es": {1458129600: 1.0},
u"LCG.GRIDKA.de": {1458129600: 2.0},
u"LCG.Bristol.uk": {1458221400: 9.0},
u"LCG.CERN.ch": {1458140400: 120.0},
u"LCG.Bologna.it": {1458221400: 1.0},
},
"granularity": 1800,
}
def test_getLastDayData(putAndDelete):
params = {"Status": "Running", "Site": "LCG.NIKHEF.nl"}
result = client.getLastDayData("WMSHistory", params)
assert result["OK"], result["Message"]
assert len(result["Value"]) == 2
assert sorted(result["Value"][0]) == sorted(
[
u"Status",
u"Jobs",
u"JobSplitType",
u"timestamp",
u"MinorStatus",
u"Site",
u"Reschedules",
u"ApplicationStatus",
u"User",
u"JobGroup",
u"UserGroup",
]
)
|
ic-hep/DIRAC
|
tests/Integration/Monitoring/Test_MonitoringSystem.py
|
Python
|
gpl-3.0
| 8,716
|
[
"DIRAC"
] |
6edffc73eea2bce63eae2cebc65a406da4edcc8063265e42746241d5af985daa
|
from sqlalchemy import *
from migrate import *
import datetime
now = datetime.datetime.utcnow
# Need our custom types, but don't import anything else from model
from galaxy.model.custom_types import *
import logging
log = logging.getLogger( __name__ )
metadata = MetaData( migrate_engine )
# New table in changeset 1568:0b022adfdc34
MetadataFile_table = Table( "metadata_file", metadata,
Column( "id", Integer, primary_key=True ),
Column( "name", TEXT ),
Column( "hda_id", Integer, ForeignKey( "history_dataset_association.id" ), index=True, nullable=True ),
Column( "create_time", DateTime, default=now ),
Column( "update_time", DateTime, index=True, default=now, onupdate=now ),
Column( "deleted", Boolean, index=True, default=False ),
Column( "purged", Boolean, index=True, default=False ) )
def upgrade():
metadata.reflect()
MetadataFile_table.create()
def downgrade():
metadata.reflect()
MetadataFile_table.drop()
|
volpino/Yeps-EURAC
|
lib/galaxy/model/migrate/versions/0002_metadata_file_table.py
|
Python
|
mit
| 971
|
[
"Galaxy"
] |
1e0170502414d39a278295a4272bb5654f9c127f603f5b166ec0daed5f616a39
|
import tensorflow as tf
import numpy as np
import autoencoder
import datasets
# #################### #
# Flags definition #
# #################### #
flags = tf.app.flags
FLAGS = flags.FLAGS
# Global configuration
flags.DEFINE_string('model_name', '', 'Model name.')
flags.DEFINE_string('dataset', 'mnist', 'Which dataset to use. ["mnist", "cifar10"]')
flags.DEFINE_string('cifar_dir', '', 'Path to the cifar 10 dataset directory.')
flags.DEFINE_integer('seed', -1, 'Seed for the random generators (>= 0). Useful for testing hyperparameters.')
flags.DEFINE_boolean('restore_previous_model', False, 'If true, restore previous model corresponding to model name.')
flags.DEFINE_boolean('encode_train', False, 'Whether to encode and store the training set.')
flags.DEFINE_boolean('encode_valid', False, 'Whether to encode and store the validation set.')
flags.DEFINE_boolean('encode_test', False, 'Whether to encode and store the test set.')
# Stacked Denoising Autoencoder specific parameters
flags.DEFINE_integer('n_components', 256, 'Number of hidden units in the dae.')
flags.DEFINE_string('corr_type', 'none', 'Type of input corruption. ["none", "masking", "salt_and_pepper", "gaussian"]')
flags.DEFINE_float('corr_frac', 0., 'Fraction of the input to corrupt.')
flags.DEFINE_integer('xavier_init', 1, 'Value for the constant in xavier weights initialization.')
flags.DEFINE_string('enc_act_func', 'tanh', 'Activation function for the encoder. ["sigmoid", "tanh"]')
flags.DEFINE_string('dec_act_func', 'none', 'Activation function for the decoder. ["sigmoid", "tanh", "none"]')
flags.DEFINE_string('main_dir', 'dae/', 'Directory to store data relative to the algorithm.')
flags.DEFINE_string('loss_func', 'mean_squared', 'Loss function. ["mean_squared" or "cross_entropy"]')
flags.DEFINE_integer('verbose', 0, 'Level of verbosity. 0 - silent, 1 - print accuracy.')
flags.DEFINE_integer('weight_images', 0, 'Number of weight images to generate.')
flags.DEFINE_string('opt', 'gradient_descent', '["gradient_descent", "ada_grad", "momentum"]')
flags.DEFINE_float('learning_rate', 0.01, 'Initial learning rate.')
flags.DEFINE_float('momentum', 0.5, 'Momentum parameter.')
flags.DEFINE_integer('num_epochs', 10, 'Number of epochs.')
flags.DEFINE_integer('batch_size', 10, 'Size of each mini-batch.')
assert FLAGS.dataset in ['mnist', 'cifar10']
assert FLAGS.enc_act_func in ['sigmoid', 'tanh', 'relu']
assert FLAGS.dec_act_func in ['sigmoid', 'tanh', 'relu', 'none']
assert FLAGS.corr_type in ['masking', 'salt_and_pepper', 'gaussian', 'none']
assert 0. <= FLAGS.corr_frac <= 1.
assert FLAGS.loss_func in ['cross_entropy', 'mean_squared']
assert FLAGS.opt in ['gradient_descent', 'ada_grad', 'momentum']
if __name__ == '__main__':
if FLAGS.dataset == 'mnist':
# ################# #
# MNIST Dataset #
# ################# #
trX, vlX, teX = datasets.load_mnist_dataset(mode='unsupervised')
elif FLAGS.dataset == 'cifar10':
# ################### #
# Cifar10 Dataset #
# ################### #
# trX, teX = datasets.load_cifar10_dataset(FLAGS.cifar_dir, mode='unsupervised')
# vlX = teX[:5000] # Validation set is the first half of the test set
trX, trY, teX, teY = datasets.load_cifar10_dataset(FLAGS.cifar_dir, mode='supervised')
vlX = teX[:5000]
vlY = teY[:5000]
print(len(teY))
print(len(vlY))
# print(teY.shape, vlY.shape)
else: # cannot be reached, just for completeness
trX = None
vlX = None
teX = None
# Create the object
dae = autoencoder.DenoisingAutoencoder(
seed=FLAGS.seed, model_name=FLAGS.model_name, n_components=FLAGS.n_components,
enc_act_func=FLAGS.enc_act_func, dec_act_func=FLAGS.dec_act_func, xavier_init=FLAGS.xavier_init,
corr_type=FLAGS.corr_type, corr_frac=FLAGS.corr_frac, dataset=FLAGS.dataset,
loss_func=FLAGS.loss_func, main_dir=FLAGS.main_dir, opt=FLAGS.opt,
learning_rate=FLAGS.learning_rate, momentum=FLAGS.momentum,
verbose=FLAGS.verbose, num_epochs=FLAGS.num_epochs, batch_size=FLAGS.batch_size)
# Fit the model
dae.fit(trX, teX, restore_previous_model=FLAGS.restore_previous_model)
# Encode the training data and store it
dae.transform(trX, trY, name='train', save=FLAGS.encode_train)
dae.transform(vlX, vlY, name='validation', save=FLAGS.encode_valid)
dae.transform(teX, teY, name='test', save=FLAGS.encode_test)
stuff = dae.get_model_parameters()
# np.save(self.data_dir + self.model_name + '-' + name, encoded_data)
np.save(dae.data_dir + dae.model_name + '-encw', stuff['enc_w'])
np.save(dae.data_dir + dae.model_name + '-encb', stuff['enc_b'])
np.save(dae.data_dir + dae.model_name + '-decb', stuff['dec_b'])
# Decode the training data and store it
# dae.transform_decode(trX, name='train_decode', save=FLAGS.encode_train)
# dae.transform_decode(vlX, name='validation_decode', save=FLAGS.encode_valid)
# dae.transform_decode(teX, name='test_decode', save=FLAGS.encode_test)
# save images
dae.get_weights_as_images(32, 32, max_images=FLAGS.weight_images)
|
HoliestCow/ece692_deeplearning
|
project3/run_autoencoder.py
|
Python
|
mit
| 5,200
|
[
"Gaussian"
] |
250f9b03e53f646831481a4b98ab46282cb063208e0b4a9585d54b4c0ed32ca7
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright © 2016 Matthew Stone <mstone5@mgh.harvard.edu>
# Distributed under terms of the MIT license.
"""
"""
import sys
import argparse
import re
from collections import Counter
import numpy as np
import pysam
from rpc import ReadPair
META = """##fileformat=VCFv4.2
##source=RPC
##INFO=<ID=SVTYPE,Number=1,Type=String,Description="Type of structural variant">
##INFO=<ID=SVLEN,Number=.,Type=Integer,Description="Difference in length between REF and ALT alleles">
##INFO=<ID=CHR2,Number=1,Type=String,Description="Mate chromosome in interchromosomal events">
##INFO=<ID=END,Number=1,Type=Integer,Description="End position of the variant described in this record">
##INFO=<ID=STRANDS,Number=.,Type=String,Description="Strand orientation of the adjacency in BEDPE format (DEL:+-, DUP:-+, INV:++/--)">
##INFO=<ID=IMPRECISE,Number=0,Type=Flag,Description="Imprecise structural variation">
##FORMAT=<ID=GT,Number=1,Type=String,Description="Genotype">
##FORMAT=<ID=PE,Number=1,Type=Integer,Description="Number of paired-end reads supporting the variant">
##FORMAT=<ID=MAPQ,Number=1,Type=Float,Description="Average MapQ of reads in cluster">
##FORMAT=<ID=UNIQ,Number=1,Type=Float,Description="Cluster uniqueness = (# unique mapping positions) / (# total reads)">
##FORMAT=<ID=ALTREF,Number=1,Type=Float,Description="Alt/ref ratio = (2 * cluster size) / ((2 * cluster size) + (local coverage at A) + (local coverage at B))">
##FORMAT=<ID=GCOV,Number=1,Type=Float,Description="Global coverage ratio = (cluster size) / (median library coverage)">
##FORMAT=<ID=MAPQA,Number=1,Type=Float,Description="Average MapQ of reads at A">
##FORMAT=<ID=MAPQB,Number=1,Type=Float,Description="Average MapQ of reads at B">
##FORMAT=<ID=UNIQA,Number=1,Type=Float,Description="Uniqueness of mapping positions at A">
##FORMAT=<ID=UNIQB,Number=1,Type=Float,Description="Uniqueness of mapping positions at B">
##FORMAT=<ID=ALTREFA,Number=1,Type=Float,Description="Alt/ref ratio = (cluster size) / (cluster size + local coverage at A)">
##FORMAT=<ID=ALTREFB,Number=1,Type=Float,Description="Alt/ref ratio = (cluster size) / (cluster size + local coverage at B)">
"""
HEADER = "#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\t{SAMPLE}\n"
def RPCParser(filename=None, fsock=None, prefix=''):
"""
Parse read pairs from RPC-formatted output.
Parameters
----------
filename : str
fsock : handle
Yields
------
cname : str
cluster identifier
cluster : list of ReadPair
"""
if fsock:
rpcfile = fsock
elif filename:
rpcfile = open(filename)
else:
raise Exception('Must specify filename or fsock')
blank_exp = re.compile(r'^\s*$')
cluster = []
cname = ''
CHROMS = [str(x) for x in range(1, 23)] + 'X Y'.split()
for line in rpcfile:
if blank_exp.match(line):
cname = '_'.join([prefix, cname]).strip('_')
if cluster[0].chrA in CHROMS and cluster[0].chrB in CHROMS:
yield RPCluster(cluster, cname)
cluster = []
else:
cluster.append(ReadPair.from_rpc(line))
cname = line.split()[0]
cname = '_'.join([prefix, cname]).strip('_')
if len(cluster) > 0:
if cluster[0].chrA in CHROMS and cluster[0].chrB in CHROMS:
yield RPCluster(cluster, cname)
class RPCluster(object):
def __init__(self, pairs, name):
self.pairs = pairs
if len(set(rp.chrA for rp in self.pairs)) > 1:
raise Exception('Mismatched chromosomes')
if len(set(rp.chrB for rp in self.pairs)) > 1:
raise Exception('Mismatched chromosomes')
self.CHROM = self.pairs[0].chrA
self.chrB = self.pairs[0].chrB
self.strands = Counter([pair.strands for pair in self.pairs])
self.POS, self.END = self._get_pos()
self.ID = name
self.REF = 'N'
self.QUAL = '.'
self.FILTER = '.'
self.PE = len(self.pairs)
def _uniq(starts):
return len(set(starts)) / len(starts)
self.uniqA = _uniq([pair.read1.reference_start for pair in self.pairs])
self.uniqB = _uniq([pair.read2.reference_start for pair in self.pairs])
self.UNIQ = _uniq([pair.read1.reference_start for pair in self.pairs] +
[pair.read2.reference_start for pair in self.pairs])
self.mapqA = np.mean([pair.read1.mapq for pair in self.pairs])
self.mapqB = np.mean([pair.read2.mapq for pair in self.pairs])
self.MAPQ = np.mean([pair.read1.mapq for pair in self.pairs] +
[pair.read2.mapq for pair in self.pairs])
@property
def ALT(self):
if self.SVTYPE == 'BND':
if len(self.strands.keys()) == 1:
strand = list(self.strands.keys())[0]
if strand == '++':
alt = 'N]{chrB}:{end}]'
elif strand == '+-':
alt = 'N[{chrB}:{end}['
elif strand == '-+':
alt = ']{chrB}:{end}]N'
elif strand == '--':
alt = '[{chrB}:{end}[N'
else:
alt = '<{chrB}:{end}>'
return alt.format(chrB=self.chrB, end=self.END)
else:
return '<' + self.SVTYPE + '>'
def _get_pos(self):
posA = [rp.read1.reference_start for rp in self.pairs]
plusA = [rp.read1.reference_start for rp in self.pairs
if rp.strands.startswith('+')]
minusA = [rp.read1.reference_start for rp in self.pairs
if rp.strands.startswith('-')]
posB = [rp.read2.reference_start for rp in self.pairs]
plusB = [rp.read2.reference_start for rp in self.pairs
if rp.strands.startswith('+')]
minusB = [rp.read2.reference_start for rp in self.pairs
if rp.strands.startswith('-')]
svtype = self.SVTYPE
if svtype == 'DEL':
POS, END = max(posA), min(posB)
elif svtype == 'DUP':
POS, END = min(posA), max(posB)
elif svtype == 'INV':
# Average ++ and -- strand positions
POS = np.mean([max(plusA), min(minusA)])
END = np.mean([max(plusB), min(minusB)])
elif svtype == 'BND':
if len(self.strands.keys()) == 1:
strand = list(self.strands.keys())[0]
if strand == '++':
POS, END = max(posA), max(posB)
elif strand == '+-':
POS, END = max(posA), min(posB)
elif strand == '-+':
POS, END = min(posA), max(posB)
elif strand == '--':
POS, END = min(posA), min(posB)
else:
As = []
if plusA:
As.append(max(plusA))
if minusA:
As.append(min(minusA))
POS = np.mean(As)
Bs = []
if plusB:
Bs.append(max(plusB))
if minusB:
Bs.append(min(minusB))
END = np.mean(Bs)
return int(POS), int(END)
@property
def SVTYPE(self):
if self.chrB != self.CHROM:
return 'BND'
if len(self.strands.keys()) == 1:
strand = list(self.strands.keys())[0]
if strand == '++':
return 'BND'
elif strand == '--':
return 'BND'
elif strand == '+-':
return 'DEL'
elif strand == '-+':
return 'DUP'
else:
raise Exception('Invalid strand')
else:
strands = list(self.strands.keys())
if len(strands) > 2:
return 'BND'
elif '++' in strands and '--' in strands:
return 'INV'
else:
return 'BND'
@property
def INFO(self):
strands = sorted(self.strands.items(), key=lambda tup: tup[0])
strands = ['{0}:{1}'.format(s, c) for s, c in strands]
strands = ','.join(strands)
if self.SVTYPE == 'BND':
INFO = ('SVTYPE={svtype};SVLEN={svlen};CHR2={chr2};END={end};'
'STRANDS={strands};IMPRECISE')
INFO = INFO.format(svtype=self.SVTYPE, svlen=(self.POS-self.END),
end=self.END, strands=strands, chr2=self.chrB)
else:
INFO = ('SVTYPE={svtype};SVLEN={svlen};END={end};'
'STRANDS={strands};IMPRECISE')
INFO = INFO.format(svtype=self.SVTYPE, svlen=(self.POS-self.END),
end=self.END, strands=strands)
return INFO
def vcf(self, sample, mean_cov, cov_matrix):
gcov_ratio = self.PE / mean_cov
header = next(cov_matrix.header).decode('utf-8')
cov_idx = header.split().index(sample)
ref = 0
localA = next(cov_matrix.fetch(self.CHROM, self.POS, self.POS+1))
localA = int(localA.split()[cov_idx])
ref = ref + localA
localA = self.PE / float(self.PE + localA)
localB = next(cov_matrix.fetch(self.chrB, self.END, self.END+1))
localB = int(localB.split()[cov_idx])
ref = ref + localB
localB = self.PE / float(self.PE + localB)
ALTREF = (2 * self.PE) / float(2 * self.PE + ref)
record = '\t'.join('{CHROM} {POS} {ID} {REF} {ALT} {QUAL} {FILTER} '
'{INFO} {FORMAT} {CALL}'.split())
FORMAT = ('GT:PE:MAPQ:UNIQ:ALTREF:GCOV:'
'MAPQA:MAPQB:UNIQA:UNIQB:ALTREFA:ALTREFB')
CALL = ('{GT}:{PE}:{MAPQ:.01f}:{UNIQ:.03f}:{ALTREF:.03f}:{GCOV:.03f}:'
'{MAPQA:.01f}:{MAPQB:.01f}:'
'{UNIQA:.03f}:{UNIQB:.03f}:'
'{ALTREFA:.03f}:{ALTREFB:.03f}')
CALL = CALL.format(GT='./.',
PE=self.PE,
MAPQ=self.MAPQ,
UNIQ=self.UNIQ,
ALTREF=ALTREF,
GCOV=gcov_ratio,
MAPQA=self.mapqA,
MAPQB=self.mapqB,
UNIQA=self.uniqA,
UNIQB=self.uniqB,
ALTREFA=localA,
ALTREFB=localB)
record = record.format(CHROM=self.CHROM,
POS=self.POS,
ID=self.ID,
REF=self.REF,
ALT=self.ALT,
QUAL=self.QUAL,
FILTER=self.FILTER,
INFO=self.INFO,
FORMAT=FORMAT,
CALL=CALL)
return record
def main():
parser = argparse.ArgumentParser(
description="")
parser.add_argument('-r', '--rpcfiles', nargs=4,
help='del dup inv tloc')
parser.add_argument('vcf', type=argparse.FileType('w'),
nargs='?', default=sys.stdout)
parser.add_argument('-s', '--sample')
parser.add_argument('-m', '--cov_matrix', type=pysam.TabixFile)
parser.add_argument('-c', '--gcov', type=int)
args = parser.parse_args()
args.vcf.write(META)
args.vcf.write(HEADER.format(SAMPLE=args.sample))
svtypes = 'del dup inv tloc'.split()
for svtype, rpcfile in zip(svtypes, args.rpcfiles):
prefix = '_'.join([args.sample, svtype])
for cluster in RPCParser(filename=rpcfile, prefix=prefix):
record = cluster.vcf(args.sample, args.gcov, args.cov_matrix)
args.vcf.write(record + '\n')
if __name__ == '__main__':
main()
|
talkowski-lab/Holmes
|
readpaircluster/rpc2vcf.py
|
Python
|
mit
| 11,914
|
[
"pysam"
] |
5ccf3415da47945156f88045cea4db8e6836b828550f4a9fc2f77ac4b67657d2
|
#!/usr/bin/env python3
#
# Argument Clinic
# Copyright 2012-2013 by Larry Hastings.
# Licensed to the PSF under a contributor agreement.
#
import abc
import ast
import collections
import contextlib
import copy
import cpp
import functools
import hashlib
import inspect
import io
import itertools
import os
import pprint
import re
import shlex
import string
import sys
import tempfile
import textwrap
import traceback
import types
from types import *
NoneType = type(None)
# TODO:
#
# soon:
#
# * allow mixing any two of {positional-only, positional-or-keyword,
# keyword-only}
# * dict constructor uses positional-only and keyword-only
# * max and min use positional only with an optional group
# and keyword-only
#
version = '1'
_empty = inspect._empty
_void = inspect._void
NoneType = type(None)
class Unspecified:
def __repr__(self):
return '<Unspecified>'
unspecified = Unspecified()
class Null:
def __repr__(self):
return '<Null>'
NULL = Null()
class Unknown:
def __repr__(self):
return '<Unknown>'
unknown = Unknown()
sig_end_marker = '--'
_text_accumulator_nt = collections.namedtuple("_text_accumulator", "text append output")
def _text_accumulator():
text = []
def output():
s = ''.join(text)
text.clear()
return s
return _text_accumulator_nt(text, text.append, output)
text_accumulator_nt = collections.namedtuple("text_accumulator", "text append")
def text_accumulator():
"""
Creates a simple text accumulator / joiner.
Returns a pair of callables:
append, output
"append" appends a string to the accumulator.
"output" returns the contents of the accumulator
joined together (''.join(accumulator)) and
empties the accumulator.
"""
text, append, output = _text_accumulator()
return text_accumulator_nt(append, output)
def warn_or_fail(fail=False, *args, filename=None, line_number=None):
joined = " ".join([str(a) for a in args])
add, output = text_accumulator()
if fail:
add("Error")
else:
add("Warning")
if clinic:
if filename is None:
filename = clinic.filename
if getattr(clinic, 'block_parser', None) and (line_number is None):
line_number = clinic.block_parser.line_number
if filename is not None:
add(' in file "' + filename + '"')
if line_number is not None:
add(" on line " + str(line_number))
add(':\n')
add(joined)
print(output())
if fail:
sys.exit(-1)
def warn(*args, filename=None, line_number=None):
return warn_or_fail(False, *args, filename=filename, line_number=line_number)
def fail(*args, filename=None, line_number=None):
return warn_or_fail(True, *args, filename=filename, line_number=line_number)
def quoted_for_c_string(s):
for old, new in (
('\\', '\\\\'), # must be first!
('"', '\\"'),
("'", "\\'"),
):
s = s.replace(old, new)
return s
def c_repr(s):
return '"' + s + '"'
is_legal_c_identifier = re.compile('^[A-Za-z_][A-Za-z0-9_]*$').match
def is_legal_py_identifier(s):
return all(is_legal_c_identifier(field) for field in s.split('.'))
# identifiers that are okay in Python but aren't a good idea in C.
# so if they're used Argument Clinic will add "_value" to the end
# of the name in C.
c_keywords = set("""
asm auto break case char const continue default do double
else enum extern float for goto if inline int long
register return short signed sizeof static struct switch
typedef typeof union unsigned void volatile while
""".strip().split())
def ensure_legal_c_identifier(s):
# for now, just complain if what we're given isn't legal
if not is_legal_c_identifier(s):
fail("Illegal C identifier: {}".format(s))
# but if we picked a C keyword, pick something else
if s in c_keywords:
return s + "_value"
return s
def rstrip_lines(s):
text, add, output = _text_accumulator()
for line in s.split('\n'):
add(line.rstrip())
add('\n')
text.pop()
return output()
def format_escape(s):
# double up curly-braces, this string will be used
# as part of a format_map() template later
s = s.replace('{', '{{')
s = s.replace('}', '}}')
return s
def linear_format(s, **kwargs):
"""
Perform str.format-like substitution, except:
* The strings substituted must be on lines by
themselves. (This line is the "source line".)
* If the substitution text is empty, the source line
is removed in the output.
* If the field is not recognized, the original line
is passed unmodified through to the output.
* If the substitution text is not empty:
* Each line of the substituted text is indented
by the indent of the source line.
* A newline will be added to the end.
"""
add, output = text_accumulator()
for line in s.split('\n'):
indent, curly, trailing = line.partition('{')
if not curly:
add(line)
add('\n')
continue
name, curly, trailing = trailing.partition('}')
if not curly or name not in kwargs:
add(line)
add('\n')
continue
if trailing:
fail("Text found after {" + name + "} block marker! It must be on a line by itself.")
if indent.strip():
fail("Non-whitespace characters found before {" + name + "} block marker! It must be on a line by itself.")
value = kwargs[name]
if not value:
continue
value = textwrap.indent(rstrip_lines(value), indent)
add(value)
add('\n')
return output()[:-1]
def indent_all_lines(s, prefix):
"""
Returns 's', with 'prefix' prepended to all lines.
If the last line is empty, prefix is not prepended
to it. (If s is blank, returns s unchanged.)
(textwrap.indent only adds to non-blank lines.)
"""
split = s.split('\n')
last = split.pop()
final = []
for line in split:
final.append(prefix)
final.append(line)
final.append('\n')
if last:
final.append(prefix)
final.append(last)
return ''.join(final)
def suffix_all_lines(s, suffix):
"""
Returns 's', with 'suffix' appended to all lines.
If the last line is empty, suffix is not appended
to it. (If s is blank, returns s unchanged.)
"""
split = s.split('\n')
last = split.pop()
final = []
for line in split:
final.append(line)
final.append(suffix)
final.append('\n')
if last:
final.append(last)
final.append(suffix)
return ''.join(final)
def version_splitter(s):
"""Splits a version string into a tuple of integers.
The following ASCII characters are allowed, and employ
the following conversions:
a -> -3
b -> -2
c -> -1
(This permits Python-style version strings such as "1.4b3".)
"""
version = []
accumulator = []
def flush():
if not accumulator:
raise ValueError('Unsupported version string: ' + repr(s))
version.append(int(''.join(accumulator)))
accumulator.clear()
for c in s:
if c.isdigit():
accumulator.append(c)
elif c == '.':
flush()
elif c in 'abc':
flush()
version.append('abc'.index(c) - 3)
else:
raise ValueError('Illegal character ' + repr(c) + ' in version string ' + repr(s))
flush()
return tuple(version)
def version_comparitor(version1, version2):
iterator = itertools.zip_longest(version_splitter(version1), version_splitter(version2), fillvalue=0)
for i, (a, b) in enumerate(iterator):
if a < b:
return -1
if a > b:
return 1
return 0
class CRenderData:
def __init__(self):
# The C statements to declare variables.
# Should be full lines with \n eol characters.
self.declarations = []
# The C statements required to initialize the variables before the parse call.
# Should be full lines with \n eol characters.
self.initializers = []
# The C statements needed to dynamically modify the values
# parsed by the parse call, before calling the impl.
self.modifications = []
# The entries for the "keywords" array for PyArg_ParseTuple.
# Should be individual strings representing the names.
self.keywords = []
# The "format units" for PyArg_ParseTuple.
# Should be individual strings that will get
self.format_units = []
# The varargs arguments for PyArg_ParseTuple.
self.parse_arguments = []
# The parameter declarations for the impl function.
self.impl_parameters = []
# The arguments to the impl function at the time it's called.
self.impl_arguments = []
# For return converters: the name of the variable that
# should receive the value returned by the impl.
self.return_value = "return_value"
# For return converters: the code to convert the return
# value from the parse function. This is also where
# you should check the _return_value for errors, and
# "goto exit" if there are any.
self.return_conversion = []
# The C statements required to clean up after the impl call.
self.cleanup = []
class FormatCounterFormatter(string.Formatter):
"""
This counts how many instances of each formatter
"replacement string" appear in the format string.
e.g. after evaluating "string {a}, {b}, {c}, {a}"
the counts dict would now look like
{'a': 2, 'b': 1, 'c': 1}
"""
def __init__(self):
self.counts = collections.Counter()
def get_value(self, key, args, kwargs):
self.counts[key] += 1
return ''
class Language(metaclass=abc.ABCMeta):
start_line = ""
body_prefix = ""
stop_line = ""
checksum_line = ""
def __init__(self, filename):
pass
@abc.abstractmethod
def render(self, clinic, signatures):
pass
def parse_line(self, line):
pass
def validate(self):
def assert_only_one(attr, *additional_fields):
"""
Ensures that the string found at getattr(self, attr)
contains exactly one formatter replacement string for
each valid field. The list of valid fields is
['dsl_name'] extended by additional_fields.
e.g.
self.fmt = "{dsl_name} {a} {b}"
# this passes
self.assert_only_one('fmt', 'a', 'b')
# this fails, the format string has a {b} in it
self.assert_only_one('fmt', 'a')
# this fails, the format string doesn't have a {c} in it
self.assert_only_one('fmt', 'a', 'b', 'c')
# this fails, the format string has two {a}s in it,
# it must contain exactly one
self.fmt2 = '{dsl_name} {a} {a}'
self.assert_only_one('fmt2', 'a')
"""
fields = ['dsl_name']
fields.extend(additional_fields)
line = getattr(self, attr)
fcf = FormatCounterFormatter()
fcf.format(line)
def local_fail(should_be_there_but_isnt):
if should_be_there_but_isnt:
fail("{} {} must contain {{{}}} exactly once!".format(
self.__class__.__name__, attr, name))
else:
fail("{} {} must not contain {{{}}}!".format(
self.__class__.__name__, attr, name))
for name, count in fcf.counts.items():
if name in fields:
if count > 1:
local_fail(True)
else:
local_fail(False)
for name in fields:
if fcf.counts.get(name) != 1:
local_fail(True)
assert_only_one('start_line')
assert_only_one('stop_line')
field = "arguments" if "{arguments}" in self.checksum_line else "checksum"
assert_only_one('checksum_line', field)
class PythonLanguage(Language):
language = 'Python'
start_line = "#/*[{dsl_name} input]"
body_prefix = "#"
stop_line = "#[{dsl_name} start generated code]*/"
checksum_line = "#/*[{dsl_name} end generated code: {arguments}]*/"
def permute_left_option_groups(l):
"""
Given [1, 2, 3], should yield:
()
(3,)
(2, 3)
(1, 2, 3)
"""
yield tuple()
accumulator = []
for group in reversed(l):
accumulator = list(group) + accumulator
yield tuple(accumulator)
def permute_right_option_groups(l):
"""
Given [1, 2, 3], should yield:
()
(1,)
(1, 2)
(1, 2, 3)
"""
yield tuple()
accumulator = []
for group in l:
accumulator.extend(group)
yield tuple(accumulator)
def permute_optional_groups(left, required, right):
"""
Generator function that computes the set of acceptable
argument lists for the provided iterables of
argument groups. (Actually it generates a tuple of tuples.)
Algorithm: prefer left options over right options.
If required is empty, left must also be empty.
"""
required = tuple(required)
result = []
if not required:
assert not left
accumulator = []
counts = set()
for r in permute_right_option_groups(right):
for l in permute_left_option_groups(left):
t = l + required + r
if len(t) in counts:
continue
counts.add(len(t))
accumulator.append(t)
accumulator.sort(key=len)
return tuple(accumulator)
def strip_leading_and_trailing_blank_lines(s):
lines = s.rstrip().split('\n')
while lines:
line = lines[0]
if line.strip():
break
del lines[0]
return '\n'.join(lines)
@functools.lru_cache()
def normalize_snippet(s, *, indent=0):
"""
Reformats s:
* removes leading and trailing blank lines
* ensures that it does not end with a newline
* dedents so the first nonwhite character on any line is at column "indent"
"""
s = strip_leading_and_trailing_blank_lines(s)
s = textwrap.dedent(s)
if indent:
s = textwrap.indent(s, ' ' * indent)
return s
def wrap_declarations(text, length=78):
"""
A simple-minded text wrapper for C function declarations.
It views a declaration line as looking like this:
xxxxxxxx(xxxxxxxxx,xxxxxxxxx)
If called with length=30, it would wrap that line into
xxxxxxxx(xxxxxxxxx,
xxxxxxxxx)
(If the declaration has zero or one parameters, this
function won't wrap it.)
If this doesn't work properly, it's probably better to
start from scratch with a more sophisticated algorithm,
rather than try and improve/debug this dumb little function.
"""
lines = []
for line in text.split('\n'):
prefix, _, after_l_paren = line.partition('(')
if not after_l_paren:
lines.append(line)
continue
parameters, _, after_r_paren = after_l_paren.partition(')')
if not _:
lines.append(line)
continue
if ',' not in parameters:
lines.append(line)
continue
parameters = [x.strip() + ", " for x in parameters.split(',')]
prefix += "("
if len(prefix) < length:
spaces = " " * len(prefix)
else:
spaces = " " * 4
while parameters:
line = prefix
first = True
while parameters:
if (not first and
(len(line) + len(parameters[0]) > length)):
break
line += parameters.pop(0)
first = False
if not parameters:
line = line.rstrip(", ") + ")" + after_r_paren
lines.append(line.rstrip())
prefix = spaces
return "\n".join(lines)
class CLanguage(Language):
body_prefix = "#"
language = 'C'
start_line = "/*[{dsl_name} input]"
body_prefix = ""
stop_line = "[{dsl_name} start generated code]*/"
checksum_line = "/*[{dsl_name} end generated code: {arguments}]*/"
def __init__(self, filename):
super().__init__(filename)
self.cpp = cpp.Monitor(filename)
self.cpp.fail = fail
def parse_line(self, line):
self.cpp.writeline(line)
def render(self, clinic, signatures):
function = None
for o in signatures:
if isinstance(o, Function):
if function:
fail("You may specify at most one function per block.\nFound a block containing at least two:\n\t" + repr(function) + " and " + repr(o))
function = o
return self.render_function(clinic, function)
def docstring_for_c_string(self, f):
text, add, output = _text_accumulator()
# turn docstring into a properly quoted C string
for line in f.docstring.split('\n'):
add('"')
add(quoted_for_c_string(line))
add('\\n"\n')
if text[-2] == sig_end_marker:
# If we only have a signature, add the blank line that the
# __text_signature__ getter expects to be there.
add('"\\n"')
else:
text.pop()
add('"')
return ''.join(text)
def output_templates(self, f):
parameters = list(f.parameters.values())
assert parameters
assert isinstance(parameters[0].converter, self_converter)
del parameters[0]
converters = [p.converter for p in parameters]
has_option_groups = parameters and (parameters[0].group or parameters[-1].group)
default_return_converter = (not f.return_converter or
f.return_converter.type == 'PyObject *')
positional = parameters and parameters[-1].is_positional_only()
all_boring_objects = False # yes, this will be false if there are 0 parameters, it's fine
first_optional = len(parameters)
for i, p in enumerate(parameters):
c = p.converter
if type(c) != object_converter:
break
if c.format_unit != 'O':
break
if p.default is not unspecified:
first_optional = min(first_optional, i)
else:
all_boring_objects = True
new_or_init = f.kind in (METHOD_NEW, METHOD_INIT)
meth_o = (len(parameters) == 1 and
parameters[0].is_positional_only() and
not converters[0].is_optional() and
not new_or_init)
# we have to set these things before we're done:
#
# docstring_prototype
# docstring_definition
# impl_prototype
# methoddef_define
# parser_prototype
# parser_definition
# impl_definition
# cpp_if
# cpp_endif
# methoddef_ifndef
return_value_declaration = "PyObject *return_value = NULL;"
methoddef_define = normalize_snippet("""
#define {methoddef_name} \\
{{"{name}", (PyCFunction){c_basename}, {methoddef_flags}, {c_basename}__doc__}},
""")
if new_or_init and not f.docstring:
docstring_prototype = docstring_definition = ''
else:
docstring_prototype = normalize_snippet("""
PyDoc_VAR({c_basename}__doc__);
""")
docstring_definition = normalize_snippet("""
PyDoc_STRVAR({c_basename}__doc__,
{docstring});
""")
impl_definition = normalize_snippet("""
static {impl_return_type}
{c_basename}_impl({impl_parameters})
""")
impl_prototype = parser_prototype = parser_definition = None
parser_prototype_keyword = normalize_snippet("""
static PyObject *
{c_basename}({self_type}{self_name}, PyObject *args, PyObject *kwargs)
""")
parser_prototype_varargs = normalize_snippet("""
static PyObject *
{c_basename}({self_type}{self_name}, PyObject *args)
""")
parser_prototype_fastcall = normalize_snippet("""
static PyObject *
{c_basename}({self_type}{self_name}, PyObject *const *args, Py_ssize_t nargs)
""")
parser_prototype_fastcall_keywords = normalize_snippet("""
static PyObject *
{c_basename}({self_type}{self_name}, PyObject *const *args, Py_ssize_t nargs, PyObject *kwnames)
""")
# parser_body_fields remembers the fields passed in to the
# previous call to parser_body. this is used for an awful hack.
parser_body_fields = ()
def parser_body(prototype, *fields):
nonlocal parser_body_fields
add, output = text_accumulator()
add(prototype)
parser_body_fields = fields
fields = list(fields)
fields.insert(0, normalize_snippet("""
{{
{return_value_declaration}
{declarations}
{initializers}
""") + "\n")
# just imagine--your code is here in the middle
fields.append(normalize_snippet("""
{modifications}
{return_value} = {c_basename}_impl({impl_arguments});
{return_conversion}
{exit_label}
{cleanup}
return return_value;
}}
"""))
for field in fields:
add('\n')
add(field)
return output()
def insert_keywords(s):
return linear_format(s, declarations=
'static const char * const _keywords[] = {{{keywords}, NULL}};\n'
'static _PyArg_Parser _parser = {{"{format_units}:{name}", _keywords, 0}};\n'
'{declarations}')
if not parameters:
# no parameters, METH_NOARGS
flags = "METH_NOARGS"
parser_prototype = normalize_snippet("""
static PyObject *
{c_basename}({self_type}{self_name}, PyObject *Py_UNUSED(ignored))
""")
parser_definition = parser_prototype
if default_return_converter:
parser_definition = parser_prototype + '\n' + normalize_snippet("""
{{
return {c_basename}_impl({impl_arguments});
}}
""")
else:
parser_definition = parser_body(parser_prototype)
elif meth_o:
flags = "METH_O"
if (isinstance(converters[0], object_converter) and
converters[0].format_unit == 'O'):
meth_o_prototype = normalize_snippet("""
static PyObject *
{c_basename}({impl_parameters})
""")
if default_return_converter:
# maps perfectly to METH_O, doesn't need a return converter.
# so we skip making a parse function
# and call directly into the impl function.
impl_prototype = parser_prototype = parser_definition = ''
impl_definition = meth_o_prototype
else:
# SLIGHT HACK
# use impl_parameters for the parser here!
parser_prototype = meth_o_prototype
parser_definition = parser_body(parser_prototype)
else:
argname = 'arg'
if parameters[0].name == argname:
argname += '_'
parser_prototype = normalize_snippet("""
static PyObject *
{c_basename}({self_type}{self_name}, PyObject *%s)
""" % argname)
parser_definition = parser_body(parser_prototype, normalize_snippet("""
if (!PyArg_Parse(%s, "{format_units}:{name}", {parse_arguments})) {{
goto exit;
}}
""" % argname, indent=4))
elif has_option_groups:
# positional parameters with option groups
# (we have to generate lots of PyArg_ParseTuple calls
# in a big switch statement)
flags = "METH_VARARGS"
parser_prototype = parser_prototype_varargs
parser_definition = parser_body(parser_prototype, ' {option_group_parsing}')
elif positional and all_boring_objects:
# positional-only, but no option groups,
# and nothing but normal objects:
# PyArg_UnpackTuple!
if not new_or_init:
flags = "METH_FASTCALL"
parser_prototype = parser_prototype_fastcall
parser_definition = parser_body(parser_prototype, normalize_snippet("""
if (!_PyArg_UnpackStack(args, nargs, "{name}",
{unpack_min}, {unpack_max},
{parse_arguments})) {{
goto exit;
}}
""", indent=4))
else:
flags = "METH_VARARGS"
parser_prototype = parser_prototype_varargs
parser_definition = parser_body(parser_prototype, normalize_snippet("""
if (!PyArg_UnpackTuple(args, "{name}",
{unpack_min}, {unpack_max},
{parse_arguments})) {{
goto exit;
}}
""", indent=4))
elif positional:
if not new_or_init:
# positional-only, but no option groups
# we only need one call to _PyArg_ParseStack
flags = "METH_FASTCALL"
parser_prototype = parser_prototype_fastcall
parser_definition = parser_body(parser_prototype, normalize_snippet("""
if (!_PyArg_ParseStack(args, nargs, "{format_units}:{name}",
{parse_arguments})) {{
goto exit;
}}
""", indent=4))
else:
# positional-only, but no option groups
# we only need one call to PyArg_ParseTuple
flags = "METH_VARARGS"
parser_prototype = parser_prototype_varargs
parser_definition = parser_body(parser_prototype, normalize_snippet("""
if (!PyArg_ParseTuple(args, "{format_units}:{name}",
{parse_arguments})) {{
goto exit;
}}
""", indent=4))
elif not new_or_init:
flags = "METH_FASTCALL|METH_KEYWORDS"
parser_prototype = parser_prototype_fastcall_keywords
body = normalize_snippet("""
if (!_PyArg_ParseStackAndKeywords(args, nargs, kwnames, &_parser,
{parse_arguments})) {{
goto exit;
}}
""", indent=4)
parser_definition = parser_body(parser_prototype, body)
parser_definition = insert_keywords(parser_definition)
else:
# positional-or-keyword arguments
flags = "METH_VARARGS|METH_KEYWORDS"
parser_prototype = parser_prototype_keyword
body = normalize_snippet("""
if (!_PyArg_ParseTupleAndKeywordsFast(args, kwargs, &_parser,
{parse_arguments})) {{
goto exit;
}}
""", indent=4)
parser_definition = parser_body(parser_prototype, body)
parser_definition = insert_keywords(parser_definition)
if new_or_init:
methoddef_define = ''
if f.kind == METHOD_NEW:
parser_prototype = parser_prototype_keyword
else:
return_value_declaration = "int return_value = -1;"
parser_prototype = normalize_snippet("""
static int
{c_basename}({self_type}{self_name}, PyObject *args, PyObject *kwargs)
""")
fields = list(parser_body_fields)
parses_positional = 'METH_NOARGS' not in flags
parses_keywords = 'METH_KEYWORDS' in flags
if parses_keywords:
assert parses_positional
if not parses_keywords:
fields.insert(0, normalize_snippet("""
if ({self_type_check}!_PyArg_NoKeywords("{name}", kwargs)) {{
goto exit;
}}
""", indent=4))
if not parses_positional:
fields.insert(0, normalize_snippet("""
if ({self_type_check}!_PyArg_NoPositional("{name}", args)) {{
goto exit;
}}
""", indent=4))
parser_definition = parser_body(parser_prototype, *fields)
if parses_keywords:
parser_definition = insert_keywords(parser_definition)
if f.methoddef_flags:
flags += '|' + f.methoddef_flags
methoddef_define = methoddef_define.replace('{methoddef_flags}', flags)
methoddef_ifndef = ''
conditional = self.cpp.condition()
if not conditional:
cpp_if = cpp_endif = ''
else:
cpp_if = "#if " + conditional
cpp_endif = "#endif /* " + conditional + " */"
if methoddef_define and f.full_name not in clinic.ifndef_symbols:
clinic.ifndef_symbols.add(f.full_name)
methoddef_ifndef = normalize_snippet("""
#ifndef {methoddef_name}
#define {methoddef_name}
#endif /* !defined({methoddef_name}) */
""")
# add ';' to the end of parser_prototype and impl_prototype
# (they mustn't be None, but they could be an empty string.)
assert parser_prototype is not None
if parser_prototype:
assert not parser_prototype.endswith(';')
parser_prototype += ';'
if impl_prototype is None:
impl_prototype = impl_definition
if impl_prototype:
impl_prototype += ";"
parser_definition = parser_definition.replace("{return_value_declaration}", return_value_declaration)
d = {
"docstring_prototype" : docstring_prototype,
"docstring_definition" : docstring_definition,
"impl_prototype" : impl_prototype,
"methoddef_define" : methoddef_define,
"parser_prototype" : parser_prototype,
"parser_definition" : parser_definition,
"impl_definition" : impl_definition,
"cpp_if" : cpp_if,
"cpp_endif" : cpp_endif,
"methoddef_ifndef" : methoddef_ifndef,
}
# make sure we didn't forget to assign something,
# and wrap each non-empty value in \n's
d2 = {}
for name, value in d.items():
assert value is not None, "got a None value for template " + repr(name)
if value:
value = '\n' + value + '\n'
d2[name] = value
return d2
@staticmethod
def group_to_variable_name(group):
adjective = "left_" if group < 0 else "right_"
return "group_" + adjective + str(abs(group))
def render_option_group_parsing(self, f, template_dict):
# positional only, grouped, optional arguments!
# can be optional on the left or right.
# here's an example:
#
# [ [ [ A1 A2 ] B1 B2 B3 ] C1 C2 ] D1 D2 D3 [ E1 E2 E3 [ F1 F2 F3 ] ]
#
# Here group D are required, and all other groups are optional.
# (Group D's "group" is actually None.)
# We can figure out which sets of arguments we have based on
# how many arguments are in the tuple.
#
# Note that you need to count up on both sides. For example,
# you could have groups C+D, or C+D+E, or C+D+E+F.
#
# What if the number of arguments leads us to an ambiguous result?
# Clinic prefers groups on the left. So in the above example,
# five arguments would map to B+C, not C+D.
add, output = text_accumulator()
parameters = list(f.parameters.values())
if isinstance(parameters[0].converter, self_converter):
del parameters[0]
groups = []
group = None
left = []
right = []
required = []
last = unspecified
for p in parameters:
group_id = p.group
if group_id != last:
last = group_id
group = []
if group_id < 0:
left.append(group)
elif group_id == 0:
group = required
else:
right.append(group)
group.append(p)
count_min = sys.maxsize
count_max = -1
add("switch (PyTuple_GET_SIZE(args)) {\n")
for subset in permute_optional_groups(left, required, right):
count = len(subset)
count_min = min(count_min, count)
count_max = max(count_max, count)
if count == 0:
add(""" case 0:
break;
""")
continue
group_ids = {p.group for p in subset} # eliminate duplicates
d = {}
d['count'] = count
d['name'] = f.name
d['format_units'] = "".join(p.converter.format_unit for p in subset)
parse_arguments = []
for p in subset:
p.converter.parse_argument(parse_arguments)
d['parse_arguments'] = ", ".join(parse_arguments)
group_ids.discard(0)
lines = [self.group_to_variable_name(g) + " = 1;" for g in group_ids]
lines = "\n".join(lines)
s = """
case {count}:
if (!PyArg_ParseTuple(args, "{format_units}:{name}", {parse_arguments})) {{
goto exit;
}}
{group_booleans}
break;
"""[1:]
s = linear_format(s, group_booleans=lines)
s = s.format_map(d)
add(s)
add(" default:\n")
s = ' PyErr_SetString(PyExc_TypeError, "{} requires {} to {} arguments");\n'
add(s.format(f.full_name, count_min, count_max))
add(' goto exit;\n')
add("}")
template_dict['option_group_parsing'] = format_escape(output())
def render_function(self, clinic, f):
if not f:
return ""
add, output = text_accumulator()
data = CRenderData()
assert f.parameters, "We should always have a 'self' at this point!"
parameters = f.render_parameters
converters = [p.converter for p in parameters]
templates = self.output_templates(f)
f_self = parameters[0]
selfless = parameters[1:]
assert isinstance(f_self.converter, self_converter), "No self parameter in " + repr(f.full_name) + "!"
last_group = 0
first_optional = len(selfless)
positional = selfless and selfless[-1].is_positional_only()
new_or_init = f.kind in (METHOD_NEW, METHOD_INIT)
default_return_converter = (not f.return_converter or
f.return_converter.type == 'PyObject *')
has_option_groups = False
# offset i by -1 because first_optional needs to ignore self
for i, p in enumerate(parameters, -1):
c = p.converter
if (i != -1) and (p.default is not unspecified):
first_optional = min(first_optional, i)
# insert group variable
group = p.group
if last_group != group:
last_group = group
if group:
group_name = self.group_to_variable_name(group)
data.impl_arguments.append(group_name)
data.declarations.append("int " + group_name + " = 0;")
data.impl_parameters.append("int " + group_name)
has_option_groups = True
c.render(p, data)
if has_option_groups and (not positional):
fail("You cannot use optional groups ('[' and ']')\nunless all parameters are positional-only ('/').")
# HACK
# when we're METH_O, but have a custom return converter,
# we use "impl_parameters" for the parsing function
# because that works better. but that means we must
# suppress actually declaring the impl's parameters
# as variables in the parsing function. but since it's
# METH_O, we have exactly one anyway, so we know exactly
# where it is.
if ("METH_O" in templates['methoddef_define'] and
'{impl_parameters}' in templates['parser_prototype']):
data.declarations.pop(0)
template_dict = {}
full_name = f.full_name
template_dict['full_name'] = full_name
if new_or_init:
name = f.cls.name
else:
name = f.name
template_dict['name'] = name
if f.c_basename:
c_basename = f.c_basename
else:
fields = full_name.split(".")
if fields[-1] == '__new__':
fields.pop()
c_basename = "_".join(fields)
template_dict['c_basename'] = c_basename
methoddef_name = "{}_METHODDEF".format(c_basename.upper())
template_dict['methoddef_name'] = methoddef_name
template_dict['docstring'] = self.docstring_for_c_string(f)
template_dict['self_name'] = template_dict['self_type'] = template_dict['self_type_check'] = ''
f_self.converter.set_template_dict(template_dict)
f.return_converter.render(f, data)
template_dict['impl_return_type'] = f.return_converter.type
template_dict['declarations'] = format_escape("\n".join(data.declarations))
template_dict['initializers'] = "\n\n".join(data.initializers)
template_dict['modifications'] = '\n\n'.join(data.modifications)
template_dict['keywords'] = '"' + '", "'.join(data.keywords) + '"'
template_dict['format_units'] = ''.join(data.format_units)
template_dict['parse_arguments'] = ', '.join(data.parse_arguments)
template_dict['impl_parameters'] = ", ".join(data.impl_parameters)
template_dict['impl_arguments'] = ", ".join(data.impl_arguments)
template_dict['return_conversion'] = format_escape("".join(data.return_conversion).rstrip())
template_dict['cleanup'] = format_escape("".join(data.cleanup))
template_dict['return_value'] = data.return_value
# used by unpack tuple code generator
ignore_self = -1 if isinstance(converters[0], self_converter) else 0
unpack_min = first_optional
unpack_max = len(selfless)
template_dict['unpack_min'] = str(unpack_min)
template_dict['unpack_max'] = str(unpack_max)
if has_option_groups:
self.render_option_group_parsing(f, template_dict)
# buffers, not destination
for name, destination in clinic.destination_buffers.items():
template = templates[name]
if has_option_groups:
template = linear_format(template,
option_group_parsing=template_dict['option_group_parsing'])
template = linear_format(template,
declarations=template_dict['declarations'],
return_conversion=template_dict['return_conversion'],
initializers=template_dict['initializers'],
modifications=template_dict['modifications'],
cleanup=template_dict['cleanup'],
)
# Only generate the "exit:" label
# if we have any gotos
need_exit_label = "goto exit;" in template
template = linear_format(template,
exit_label="exit:" if need_exit_label else ''
)
s = template.format_map(template_dict)
# mild hack:
# reflow long impl declarations
if name in {"impl_prototype", "impl_definition"}:
s = wrap_declarations(s)
if clinic.line_prefix:
s = indent_all_lines(s, clinic.line_prefix)
if clinic.line_suffix:
s = suffix_all_lines(s, clinic.line_suffix)
destination.append(s)
return clinic.get_destination('block').dump()
@contextlib.contextmanager
def OverrideStdioWith(stdout):
saved_stdout = sys.stdout
sys.stdout = stdout
try:
yield
finally:
assert sys.stdout is stdout
sys.stdout = saved_stdout
def create_regex(before, after, word=True, whole_line=True):
"""Create an re object for matching marker lines."""
group_re = r"\w+" if word else ".+"
pattern = r'{}({}){}'
if whole_line:
pattern = '^' + pattern + '$'
pattern = pattern.format(re.escape(before), group_re, re.escape(after))
return re.compile(pattern)
class Block:
r"""
Represents a single block of text embedded in
another file. If dsl_name is None, the block represents
verbatim text, raw original text from the file, in
which case "input" will be the only non-false member.
If dsl_name is not None, the block represents a Clinic
block.
input is always str, with embedded \n characters.
input represents the original text from the file;
if it's a Clinic block, it is the original text with
the body_prefix and redundant leading whitespace removed.
dsl_name is either str or None. If str, it's the text
found on the start line of the block between the square
brackets.
signatures is either list or None. If it's a list,
it may only contain clinic.Module, clinic.Class, and
clinic.Function objects. At the moment it should
contain at most one of each.
output is either str or None. If str, it's the output
from this block, with embedded '\n' characters.
indent is either str or None. It's the leading whitespace
that was found on every line of input. (If body_prefix is
not empty, this is the indent *after* removing the
body_prefix.)
preindent is either str or None. It's the whitespace that
was found in front of every line of input *before* the
"body_prefix" (see the Language object). If body_prefix
is empty, preindent must always be empty too.
To illustrate indent and preindent: Assume that '_'
represents whitespace. If the block processed was in a
Python file, and looked like this:
____#/*[python]
____#__for a in range(20):
____#____print(a)
____#[python]*/
"preindent" would be "____" and "indent" would be "__".
"""
def __init__(self, input, dsl_name=None, signatures=None, output=None, indent='', preindent=''):
assert isinstance(input, str)
self.input = input
self.dsl_name = dsl_name
self.signatures = signatures or []
self.output = output
self.indent = indent
self.preindent = preindent
def __repr__(self):
dsl_name = self.dsl_name or "text"
def summarize(s):
s = repr(s)
if len(s) > 30:
return s[:26] + "..." + s[0]
return s
return "".join((
"<Block ", dsl_name, " input=", summarize(self.input), " output=", summarize(self.output), ">"))
class BlockParser:
"""
Block-oriented parser for Argument Clinic.
Iterator, yields Block objects.
"""
def __init__(self, input, language, *, verify=True):
"""
"input" should be a str object
with embedded \n characters.
"language" should be a Language object.
"""
language.validate()
self.input = collections.deque(reversed(input.splitlines(keepends=True)))
self.block_start_line_number = self.line_number = 0
self.language = language
before, _, after = language.start_line.partition('{dsl_name}')
assert _ == '{dsl_name}'
self.find_start_re = create_regex(before, after, whole_line=False)
self.start_re = create_regex(before, after)
self.verify = verify
self.last_checksum_re = None
self.last_dsl_name = None
self.dsl_name = None
self.first_block = True
def __iter__(self):
return self
def __next__(self):
while True:
if not self.input:
raise StopIteration
if self.dsl_name:
return_value = self.parse_clinic_block(self.dsl_name)
self.dsl_name = None
self.first_block = False
return return_value
block = self.parse_verbatim_block()
if self.first_block and not block.input:
continue
self.first_block = False
return block
def is_start_line(self, line):
match = self.start_re.match(line.lstrip())
return match.group(1) if match else None
def _line(self, lookahead=False):
self.line_number += 1
line = self.input.pop()
if not lookahead:
self.language.parse_line(line)
return line
def parse_verbatim_block(self):
add, output = text_accumulator()
self.block_start_line_number = self.line_number
while self.input:
line = self._line()
dsl_name = self.is_start_line(line)
if dsl_name:
self.dsl_name = dsl_name
break
add(line)
return Block(output())
def parse_clinic_block(self, dsl_name):
input_add, input_output = text_accumulator()
self.block_start_line_number = self.line_number + 1
stop_line = self.language.stop_line.format(dsl_name=dsl_name)
body_prefix = self.language.body_prefix.format(dsl_name=dsl_name)
def is_stop_line(line):
# make sure to recognize stop line even if it
# doesn't end with EOL (it could be the very end of the file)
if not line.startswith(stop_line):
return False
remainder = line[len(stop_line):]
return (not remainder) or remainder.isspace()
# consume body of program
while self.input:
line = self._line()
if is_stop_line(line) or self.is_start_line(line):
break
if body_prefix:
line = line.lstrip()
assert line.startswith(body_prefix)
line = line[len(body_prefix):]
input_add(line)
# consume output and checksum line, if present.
if self.last_dsl_name == dsl_name:
checksum_re = self.last_checksum_re
else:
before, _, after = self.language.checksum_line.format(dsl_name=dsl_name, arguments='{arguments}').partition('{arguments}')
assert _ == '{arguments}'
checksum_re = create_regex(before, after, word=False)
self.last_dsl_name = dsl_name
self.last_checksum_re = checksum_re
# scan forward for checksum line
output_add, output_output = text_accumulator()
arguments = None
while self.input:
line = self._line(lookahead=True)
match = checksum_re.match(line.lstrip())
arguments = match.group(1) if match else None
if arguments:
break
output_add(line)
if self.is_start_line(line):
break
output = output_output()
if arguments:
d = {}
for field in shlex.split(arguments):
name, equals, value = field.partition('=')
if not equals:
fail("Mangled Argument Clinic marker line: {!r}".format(line))
d[name.strip()] = value.strip()
if self.verify:
if 'input' in d:
checksum = d['output']
input_checksum = d['input']
else:
checksum = d['checksum']
input_checksum = None
computed = compute_checksum(output, len(checksum))
if checksum != computed:
fail("Checksum mismatch!\nExpected: {}\nComputed: {}\n"
"Suggested fix: remove all generated code including "
"the end marker,\n"
"or use the '-f' option."
.format(checksum, computed))
else:
# put back output
output_lines = output.splitlines(keepends=True)
self.line_number -= len(output_lines)
self.input.extend(reversed(output_lines))
output = None
return Block(input_output(), dsl_name, output=output)
class BlockPrinter:
def __init__(self, language, f=None):
self.language = language
self.f = f or io.StringIO()
def print_block(self, block):
input = block.input
output = block.output
dsl_name = block.dsl_name
write = self.f.write
assert not ((dsl_name == None) ^ (output == None)), "you must specify dsl_name and output together, dsl_name " + repr(dsl_name)
if not dsl_name:
write(input)
return
write(self.language.start_line.format(dsl_name=dsl_name))
write("\n")
body_prefix = self.language.body_prefix.format(dsl_name=dsl_name)
if not body_prefix:
write(input)
else:
for line in input.split('\n'):
write(body_prefix)
write(line)
write("\n")
write(self.language.stop_line.format(dsl_name=dsl_name))
write("\n")
input = ''.join(block.input)
output = ''.join(block.output)
if output:
if not output.endswith('\n'):
output += '\n'
write(output)
arguments="output={} input={}".format(compute_checksum(output, 16), compute_checksum(input, 16))
write(self.language.checksum_line.format(dsl_name=dsl_name, arguments=arguments))
write("\n")
def write(self, text):
self.f.write(text)
class BufferSeries:
"""
Behaves like a "defaultlist".
When you ask for an index that doesn't exist yet,
the object grows the list until that item exists.
So o[n] will always work.
Supports negative indices for actual items.
e.g. o[-1] is an element immediately preceding o[0].
"""
def __init__(self):
self._start = 0
self._array = []
self._constructor = _text_accumulator
def __getitem__(self, i):
i -= self._start
if i < 0:
self._start += i
prefix = [self._constructor() for x in range(-i)]
self._array = prefix + self._array
i = 0
while i >= len(self._array):
self._array.append(self._constructor())
return self._array[i]
def clear(self):
for ta in self._array:
ta._text.clear()
def dump(self):
texts = [ta.output() for ta in self._array]
return "".join(texts)
class Destination:
def __init__(self, name, type, clinic, *args):
self.name = name
self.type = type
self.clinic = clinic
valid_types = ('buffer', 'file', 'suppress')
if type not in valid_types:
fail("Invalid destination type " + repr(type) + " for " + name + " , must be " + ', '.join(valid_types))
extra_arguments = 1 if type == "file" else 0
if len(args) < extra_arguments:
fail("Not enough arguments for destination " + name + " new " + type)
if len(args) > extra_arguments:
fail("Too many arguments for destination " + name + " new " + type)
if type =='file':
d = {}
filename = clinic.filename
d['path'] = filename
dirname, basename = os.path.split(filename)
if not dirname:
dirname = '.'
d['dirname'] = dirname
d['basename'] = basename
d['basename_root'], d['basename_extension'] = os.path.splitext(filename)
self.filename = args[0].format_map(d)
self.buffers = BufferSeries()
def __repr__(self):
if self.type == 'file':
file_repr = " " + repr(self.filename)
else:
file_repr = ''
return "".join(("<Destination ", self.name, " ", self.type, file_repr, ">"))
def clear(self):
if self.type != 'buffer':
fail("Can't clear destination" + self.name + " , it's not of type buffer")
self.buffers.clear()
def dump(self):
return self.buffers.dump()
# maps strings to Language objects.
# "languages" maps the name of the language ("C", "Python").
# "extensions" maps the file extension ("c", "py").
languages = { 'C': CLanguage, 'Python': PythonLanguage }
extensions = { name: CLanguage for name in "c cc cpp cxx h hh hpp hxx".split() }
extensions['py'] = PythonLanguage
# maps strings to callables.
# these callables must be of the form:
# def foo(name, default, *, ...)
# The callable may have any number of keyword-only parameters.
# The callable must return a CConverter object.
# The callable should not call builtins.print.
converters = {}
# maps strings to callables.
# these callables follow the same rules as those for "converters" above.
# note however that they will never be called with keyword-only parameters.
legacy_converters = {}
# maps strings to callables.
# these callables must be of the form:
# def foo(*, ...)
# The callable may have any number of keyword-only parameters.
# The callable must return a CConverter object.
# The callable should not call builtins.print.
return_converters = {}
clinic = None
class Clinic:
presets_text = """
preset block
everything block
methoddef_ifndef buffer 1
docstring_prototype suppress
parser_prototype suppress
cpp_if suppress
cpp_endif suppress
preset original
everything block
methoddef_ifndef buffer 1
docstring_prototype suppress
parser_prototype suppress
cpp_if suppress
cpp_endif suppress
preset file
everything file
methoddef_ifndef file 1
docstring_prototype suppress
parser_prototype suppress
impl_definition block
preset buffer
everything buffer
methoddef_ifndef buffer 1
impl_definition block
docstring_prototype suppress
impl_prototype suppress
parser_prototype suppress
preset partial-buffer
everything buffer
methoddef_ifndef buffer 1
docstring_prototype block
impl_prototype suppress
methoddef_define block
parser_prototype block
impl_definition block
"""
def __init__(self, language, printer=None, *, force=False, verify=True, filename=None):
# maps strings to Parser objects.
# (instantiated from the "parsers" global.)
self.parsers = {}
self.language = language
if printer:
fail("Custom printers are broken right now")
self.printer = printer or BlockPrinter(language)
self.verify = verify
self.force = force
self.filename = filename
self.modules = collections.OrderedDict()
self.classes = collections.OrderedDict()
self.functions = []
self.line_prefix = self.line_suffix = ''
self.destinations = {}
self.add_destination("block", "buffer")
self.add_destination("suppress", "suppress")
self.add_destination("buffer", "buffer")
if filename:
self.add_destination("file", "file", "{dirname}/clinic/{basename}.h")
d = self.get_destination_buffer
self.destination_buffers = collections.OrderedDict((
('cpp_if', d('file')),
('docstring_prototype', d('suppress')),
('docstring_definition', d('file')),
('methoddef_define', d('file')),
('impl_prototype', d('file')),
('parser_prototype', d('suppress')),
('parser_definition', d('file')),
('cpp_endif', d('file')),
('methoddef_ifndef', d('file', 1)),
('impl_definition', d('block')),
))
self.destination_buffers_stack = []
self.ifndef_symbols = set()
self.presets = {}
preset = None
for line in self.presets_text.strip().split('\n'):
line = line.strip()
if not line:
continue
name, value, *options = line.split()
if name == 'preset':
self.presets[value] = preset = collections.OrderedDict()
continue
if len(options):
index = int(options[0])
else:
index = 0
buffer = self.get_destination_buffer(value, index)
if name == 'everything':
for name in self.destination_buffers:
preset[name] = buffer
continue
assert name in self.destination_buffers
preset[name] = buffer
global clinic
clinic = self
def add_destination(self, name, type, *args):
if name in self.destinations:
fail("Destination already exists: " + repr(name))
self.destinations[name] = Destination(name, type, self, *args)
def get_destination(self, name):
d = self.destinations.get(name)
if not d:
fail("Destination does not exist: " + repr(name))
return d
def get_destination_buffer(self, name, item=0):
d = self.get_destination(name)
return d.buffers[item]
def parse(self, input):
printer = self.printer
self.block_parser = BlockParser(input, self.language, verify=self.verify)
for block in self.block_parser:
dsl_name = block.dsl_name
if dsl_name:
if dsl_name not in self.parsers:
assert dsl_name in parsers, "No parser to handle {!r} block.".format(dsl_name)
self.parsers[dsl_name] = parsers[dsl_name](self)
parser = self.parsers[dsl_name]
try:
parser.parse(block)
except Exception:
fail('Exception raised during parsing:\n' +
traceback.format_exc().rstrip())
printer.print_block(block)
second_pass_replacements = {}
# these are destinations not buffers
for name, destination in self.destinations.items():
if destination.type == 'suppress':
continue
output = destination.dump()
if output:
block = Block("", dsl_name="clinic", output=output)
if destination.type == 'buffer':
block.input = "dump " + name + "\n"
warn("Destination buffer " + repr(name) + " not empty at end of file, emptying.")
printer.write("\n")
printer.print_block(block)
continue
if destination.type == 'file':
try:
dirname = os.path.dirname(destination.filename)
try:
os.makedirs(dirname)
except FileExistsError:
if not os.path.isdir(dirname):
fail("Can't write to destination {}, "
"can't make directory {}!".format(
destination.filename, dirname))
if self.verify:
with open(destination.filename, "rt") as f:
parser_2 = BlockParser(f.read(), language=self.language)
blocks = list(parser_2)
if (len(blocks) != 1) or (blocks[0].input != 'preserve\n'):
fail("Modified destination file " + repr(destination.filename) + ", not overwriting!")
except FileNotFoundError:
pass
block.input = 'preserve\n'
printer_2 = BlockPrinter(self.language)
printer_2.print_block(block)
with open(destination.filename, "wt") as f:
f.write(printer_2.f.getvalue())
continue
text = printer.f.getvalue()
if second_pass_replacements:
printer_2 = BlockPrinter(self.language)
parser_2 = BlockParser(text, self.language)
changed = False
for block in parser_2:
if block.dsl_name:
for id, replacement in second_pass_replacements.items():
if id in block.output:
changed = True
block.output = block.output.replace(id, replacement)
printer_2.print_block(block)
if changed:
text = printer_2.f.getvalue()
return text
def _module_and_class(self, fields):
"""
fields should be an iterable of field names.
returns a tuple of (module, class).
the module object could actually be self (a clinic object).
this function is only ever used to find the parent of where
a new class/module should go.
"""
in_classes = False
parent = module = self
cls = None
so_far = []
for field in fields:
so_far.append(field)
if not in_classes:
child = parent.modules.get(field)
if child:
parent = module = child
continue
in_classes = True
if not hasattr(parent, 'classes'):
return module, cls
child = parent.classes.get(field)
if not child:
fail('Parent class or module ' + '.'.join(so_far) + " does not exist.")
cls = parent = child
return module, cls
def parse_file(filename, *, force=False, verify=True, output=None, encoding='utf-8'):
extension = os.path.splitext(filename)[1][1:]
if not extension:
fail("Can't extract file type for file " + repr(filename))
try:
language = extensions[extension](filename)
except KeyError:
fail("Can't identify file type for file " + repr(filename))
with open(filename, 'r', encoding=encoding) as f:
raw = f.read()
# exit quickly if there are no clinic markers in the file
find_start_re = BlockParser("", language).find_start_re
if not find_start_re.search(raw):
return
clinic = Clinic(language, force=force, verify=verify, filename=filename)
cooked = clinic.parse(raw)
if (cooked == raw) and not force:
return
directory = os.path.dirname(filename) or '.'
with tempfile.TemporaryDirectory(prefix="clinic", dir=directory) as tmpdir:
bytes = cooked.encode(encoding)
tmpfilename = os.path.join(tmpdir, os.path.basename(filename))
with open(tmpfilename, "wb") as f:
f.write(bytes)
os.replace(tmpfilename, output or filename)
def compute_checksum(input, length=None):
input = input or ''
s = hashlib.sha1(input.encode('utf-8')).hexdigest()
if length:
s = s[:length]
return s
class PythonParser:
def __init__(self, clinic):
pass
def parse(self, block):
s = io.StringIO()
with OverrideStdioWith(s):
exec(block.input)
block.output = s.getvalue()
class Module:
def __init__(self, name, module=None):
self.name = name
self.module = self.parent = module
self.modules = collections.OrderedDict()
self.classes = collections.OrderedDict()
self.functions = []
def __repr__(self):
return "<clinic.Module " + repr(self.name) + " at " + str(id(self)) + ">"
class Class:
def __init__(self, name, module=None, cls=None, typedef=None, type_object=None):
self.name = name
self.module = module
self.cls = cls
self.typedef = typedef
self.type_object = type_object
self.parent = cls or module
self.classes = collections.OrderedDict()
self.functions = []
def __repr__(self):
return "<clinic.Class " + repr(self.name) + " at " + str(id(self)) + ">"
unsupported_special_methods = set("""
__abs__
__add__
__and__
__bytes__
__call__
__complex__
__delitem__
__divmod__
__eq__
__float__
__floordiv__
__ge__
__getattr__
__getattribute__
__getitem__
__gt__
__hash__
__iadd__
__iand__
__ifloordiv__
__ilshift__
__imatmul__
__imod__
__imul__
__index__
__int__
__invert__
__ior__
__ipow__
__irshift__
__isub__
__iter__
__itruediv__
__ixor__
__le__
__len__
__lshift__
__lt__
__matmul__
__mod__
__mul__
__neg__
__new__
__next__
__or__
__pos__
__pow__
__radd__
__rand__
__rdivmod__
__repr__
__rfloordiv__
__rlshift__
__rmatmul__
__rmod__
__rmul__
__ror__
__rpow__
__rrshift__
__rshift__
__rsub__
__rtruediv__
__rxor__
__setattr__
__setitem__
__str__
__sub__
__truediv__
__xor__
""".strip().split())
INVALID, CALLABLE, STATIC_METHOD, CLASS_METHOD, METHOD_INIT, METHOD_NEW = """
INVALID, CALLABLE, STATIC_METHOD, CLASS_METHOD, METHOD_INIT, METHOD_NEW
""".replace(",", "").strip().split()
class Function:
"""
Mutable duck type for inspect.Function.
docstring - a str containing
* embedded line breaks
* text outdented to the left margin
* no trailing whitespace.
It will always be true that
(not docstring) or ((not docstring[0].isspace()) and (docstring.rstrip() == docstring))
"""
def __init__(self, parameters=None, *, name,
module, cls=None, c_basename=None,
full_name=None,
return_converter, return_annotation=_empty,
docstring=None, kind=CALLABLE, coexist=False,
docstring_only=False):
self.parameters = parameters or collections.OrderedDict()
self.return_annotation = return_annotation
self.name = name
self.full_name = full_name
self.module = module
self.cls = cls
self.parent = cls or module
self.c_basename = c_basename
self.return_converter = return_converter
self.docstring = docstring or ''
self.kind = kind
self.coexist = coexist
self.self_converter = None
# docstring_only means "don't generate a machine-readable
# signature, just a normal docstring". it's True for
# functions with optional groups because we can't represent
# those accurately with inspect.Signature in 3.4.
self.docstring_only = docstring_only
self.rendered_parameters = None
__render_parameters__ = None
@property
def render_parameters(self):
if not self.__render_parameters__:
self.__render_parameters__ = l = []
for p in self.parameters.values():
p = p.copy()
p.converter.pre_render()
l.append(p)
return self.__render_parameters__
@property
def methoddef_flags(self):
if self.kind in (METHOD_INIT, METHOD_NEW):
return None
flags = []
if self.kind == CLASS_METHOD:
flags.append('METH_CLASS')
elif self.kind == STATIC_METHOD:
flags.append('METH_STATIC')
else:
assert self.kind == CALLABLE, "unknown kind: " + repr(self.kind)
if self.coexist:
flags.append('METH_COEXIST')
return '|'.join(flags)
def __repr__(self):
return '<clinic.Function ' + self.name + '>'
def copy(self, **overrides):
kwargs = {
'name': self.name, 'module': self.module, 'parameters': self.parameters,
'cls': self.cls, 'c_basename': self.c_basename,
'full_name': self.full_name,
'return_converter': self.return_converter, 'return_annotation': self.return_annotation,
'docstring': self.docstring, 'kind': self.kind, 'coexist': self.coexist,
'docstring_only': self.docstring_only,
}
kwargs.update(overrides)
f = Function(**kwargs)
parameters = collections.OrderedDict()
for name, value in f.parameters.items():
value = value.copy(function=f)
parameters[name] = value
f.parameters = parameters
return f
class Parameter:
"""
Mutable duck type of inspect.Parameter.
"""
def __init__(self, name, kind, *, default=_empty,
function, converter, annotation=_empty,
docstring=None, group=0):
self.name = name
self.kind = kind
self.default = default
self.function = function
self.converter = converter
self.annotation = annotation
self.docstring = docstring or ''
self.group = group
def __repr__(self):
return '<clinic.Parameter ' + self.name + '>'
def is_keyword_only(self):
return self.kind == inspect.Parameter.KEYWORD_ONLY
def is_positional_only(self):
return self.kind == inspect.Parameter.POSITIONAL_ONLY
def copy(self, **overrides):
kwargs = {
'name': self.name, 'kind': self.kind, 'default':self.default,
'function': self.function, 'converter': self.converter, 'annotation': self.annotation,
'docstring': self.docstring, 'group': self.group,
}
kwargs.update(overrides)
if 'converter' not in overrides:
converter = copy.copy(self.converter)
converter.function = kwargs['function']
kwargs['converter'] = converter
return Parameter(**kwargs)
class LandMine:
# try to access any
def __init__(self, message):
self.__message__ = message
def __repr__(self):
return '<LandMine ' + repr(self.__message__) + ">"
def __getattribute__(self, name):
if name in ('__repr__', '__message__'):
return super().__getattribute__(name)
# raise RuntimeError(repr(name))
fail("Stepped on a land mine, trying to access attribute " + repr(name) + ":\n" + self.__message__)
def add_c_converter(f, name=None):
if not name:
name = f.__name__
if not name.endswith('_converter'):
return f
name = name[:-len('_converter')]
converters[name] = f
return f
def add_default_legacy_c_converter(cls):
# automatically add converter for default format unit
# (but without stomping on the existing one if it's already
# set, in case you subclass)
if ((cls.format_unit not in ('O&', '')) and
(cls.format_unit not in legacy_converters)):
legacy_converters[cls.format_unit] = cls
return cls
def add_legacy_c_converter(format_unit, **kwargs):
"""
Adds a legacy converter.
"""
def closure(f):
if not kwargs:
added_f = f
else:
added_f = functools.partial(f, **kwargs)
if format_unit:
legacy_converters[format_unit] = added_f
return f
return closure
class CConverterAutoRegister(type):
def __init__(cls, name, bases, classdict):
add_c_converter(cls)
add_default_legacy_c_converter(cls)
class CConverter(metaclass=CConverterAutoRegister):
"""
For the init function, self, name, function, and default
must be keyword-or-positional parameters. All other
parameters must be keyword-only.
"""
# The C name to use for this variable.
name = None
# The Python name to use for this variable.
py_name = None
# The C type to use for this variable.
# 'type' should be a Python string specifying the type, e.g. "int".
# If this is a pointer type, the type string should end with ' *'.
type = None
# The Python default value for this parameter, as a Python value.
# Or the magic value "unspecified" if there is no default.
# Or the magic value "unknown" if this value is a cannot be evaluated
# at Argument-Clinic-preprocessing time (but is presumed to be valid
# at runtime).
default = unspecified
# If not None, default must be isinstance() of this type.
# (You can also specify a tuple of types.)
default_type = None
# "default" converted into a C value, as a string.
# Or None if there is no default.
c_default = None
# "default" converted into a Python value, as a string.
# Or None if there is no default.
py_default = None
# The default value used to initialize the C variable when
# there is no default, but not specifying a default may
# result in an "uninitialized variable" warning. This can
# easily happen when using option groups--although
# properly-written code won't actually use the variable,
# the variable does get passed in to the _impl. (Ah, if
# only dataflow analysis could inline the static function!)
#
# This value is specified as a string.
# Every non-abstract subclass should supply a valid value.
c_ignored_default = 'NULL'
# The C converter *function* to be used, if any.
# (If this is not None, format_unit must be 'O&'.)
converter = None
# Should Argument Clinic add a '&' before the name of
# the variable when passing it into the _impl function?
impl_by_reference = False
# Should Argument Clinic add a '&' before the name of
# the variable when passing it into PyArg_ParseTuple (AndKeywords)?
parse_by_reference = True
#############################################################
#############################################################
## You shouldn't need to read anything below this point to ##
## write your own converter functions. ##
#############################################################
#############################################################
# The "format unit" to specify for this variable when
# parsing arguments using PyArg_ParseTuple (AndKeywords).
# Custom converters should always use the default value of 'O&'.
format_unit = 'O&'
# What encoding do we want for this variable? Only used
# by format units starting with 'e'.
encoding = None
# Should this object be required to be a subclass of a specific type?
# If not None, should be a string representing a pointer to a
# PyTypeObject (e.g. "&PyUnicode_Type").
# Only used by the 'O!' format unit (and the "object" converter).
subclass_of = None
# Do we want an adjacent '_length' variable for this variable?
# Only used by format units ending with '#'.
length = False
# Should we show this parameter in the generated
# __text_signature__? This is *almost* always True.
# (It's only False for __new__, __init__, and METH_STATIC functions.)
show_in_signature = True
# Overrides the name used in a text signature.
# The name used for a "self" parameter must be one of
# self, type, or module; however users can set their own.
# This lets the self_converter overrule the user-settable
# name, *just* for the text signature.
# Only set by self_converter.
signature_name = None
# keep in sync with self_converter.__init__!
def __init__(self, name, py_name, function, default=unspecified, *, c_default=None, py_default=None, annotation=unspecified, **kwargs):
self.name = name
self.py_name = py_name
if default is not unspecified:
if self.default_type and not isinstance(default, (self.default_type, Unknown)):
if isinstance(self.default_type, type):
types_str = self.default_type.__name__
else:
types_str = ', '.join((cls.__name__ for cls in self.default_type))
fail("{}: default value {!r} for field {} is not of type {}".format(
self.__class__.__name__, default, name, types_str))
self.default = default
if c_default:
self.c_default = c_default
if py_default:
self.py_default = py_default
if annotation != unspecified:
fail("The 'annotation' parameter is not currently permitted.")
# this is deliberate, to prevent you from caching information
# about the function in the init.
# (that breaks if we get cloned.)
# so after this change we will noisily fail.
self.function = LandMine("Don't access members of self.function inside converter_init!")
self.converter_init(**kwargs)
self.function = function
def converter_init(self):
pass
def is_optional(self):
return (self.default is not unspecified)
def _render_self(self, parameter, data):
self.parameter = parameter
original_name = self.name
name = ensure_legal_c_identifier(original_name)
# impl_arguments
s = ("&" if self.impl_by_reference else "") + name
data.impl_arguments.append(s)
if self.length:
data.impl_arguments.append(self.length_name())
# impl_parameters
data.impl_parameters.append(self.simple_declaration(by_reference=self.impl_by_reference))
if self.length:
data.impl_parameters.append("Py_ssize_clean_t " + self.length_name())
def _render_non_self(self, parameter, data):
self.parameter = parameter
original_name = self.name
name = ensure_legal_c_identifier(original_name)
# declarations
d = self.declaration()
data.declarations.append(d)
# initializers
initializers = self.initialize()
if initializers:
data.initializers.append('/* initializers for ' + name + ' */\n' + initializers.rstrip())
# modifications
modifications = self.modify()
if modifications:
data.modifications.append('/* modifications for ' + name + ' */\n' + modifications.rstrip())
# keywords
if parameter.is_positional_only():
data.keywords.append('')
else:
data.keywords.append(parameter.name)
# format_units
if self.is_optional() and '|' not in data.format_units:
data.format_units.append('|')
if parameter.is_keyword_only() and '$' not in data.format_units:
data.format_units.append('$')
data.format_units.append(self.format_unit)
# parse_arguments
self.parse_argument(data.parse_arguments)
# cleanup
cleanup = self.cleanup()
if cleanup:
data.cleanup.append('/* Cleanup for ' + name + ' */\n' + cleanup.rstrip() + "\n")
def render(self, parameter, data):
"""
parameter is a clinic.Parameter instance.
data is a CRenderData instance.
"""
self._render_self(parameter, data)
self._render_non_self(parameter, data)
def length_name(self):
"""Computes the name of the associated "length" variable."""
if not self.length:
return None
return ensure_legal_c_identifier(self.name) + "_length"
# Why is this one broken out separately?
# For "positional-only" function parsing,
# which generates a bunch of PyArg_ParseTuple calls.
def parse_argument(self, list):
assert not (self.converter and self.encoding)
if self.format_unit == 'O&':
assert self.converter
list.append(self.converter)
if self.encoding:
list.append(c_repr(self.encoding))
elif self.subclass_of:
list.append(self.subclass_of)
legal_name = ensure_legal_c_identifier(self.name)
s = ("&" if self.parse_by_reference else "") + legal_name
list.append(s)
if self.length:
list.append("&" + self.length_name())
#
# All the functions after here are intended as extension points.
#
def simple_declaration(self, by_reference=False):
"""
Computes the basic declaration of the variable.
Used in computing the prototype declaration and the
variable declaration.
"""
prototype = [self.type]
if by_reference or not self.type.endswith('*'):
prototype.append(" ")
if by_reference:
prototype.append('*')
prototype.append(ensure_legal_c_identifier(self.name))
return "".join(prototype)
def declaration(self):
"""
The C statement to declare this variable.
"""
declaration = [self.simple_declaration()]
default = self.c_default
if not default and self.parameter.group:
default = self.c_ignored_default
if default:
declaration.append(" = ")
declaration.append(default)
declaration.append(";")
if self.length:
declaration.append('\nPy_ssize_clean_t ')
declaration.append(self.length_name())
declaration.append(';')
return "".join(declaration)
def initialize(self):
"""
The C statements required to set up this variable before parsing.
Returns a string containing this code indented at column 0.
If no initialization is necessary, returns an empty string.
"""
return ""
def modify(self):
"""
The C statements required to modify this variable after parsing.
Returns a string containing this code indented at column 0.
If no initialization is necessary, returns an empty string.
"""
return ""
def cleanup(self):
"""
The C statements required to clean up after this variable.
Returns a string containing this code indented at column 0.
If no cleanup is necessary, returns an empty string.
"""
return ""
def pre_render(self):
"""
A second initialization function, like converter_init,
called just before rendering.
You are permitted to examine self.function here.
"""
pass
class bool_converter(CConverter):
type = 'int'
default_type = bool
format_unit = 'p'
c_ignored_default = '0'
def converter_init(self, *, accept={object}):
if accept == {int}:
self.format_unit = 'i'
elif accept != {object}:
fail("bool_converter: illegal 'accept' argument " + repr(accept))
if self.default is not unspecified:
self.default = bool(self.default)
self.c_default = str(int(self.default))
class char_converter(CConverter):
type = 'char'
default_type = (bytes, bytearray)
format_unit = 'c'
c_ignored_default = "'\0'"
def converter_init(self):
if isinstance(self.default, self.default_type) and (len(self.default) != 1):
fail("char_converter: illegal default value " + repr(self.default))
@add_legacy_c_converter('B', bitwise=True)
class unsigned_char_converter(CConverter):
type = 'unsigned char'
default_type = int
format_unit = 'b'
c_ignored_default = "'\0'"
def converter_init(self, *, bitwise=False):
if bitwise:
self.format_unit = 'B'
class byte_converter(unsigned_char_converter): pass
class short_converter(CConverter):
type = 'short'
default_type = int
format_unit = 'h'
c_ignored_default = "0"
class unsigned_short_converter(CConverter):
type = 'unsigned short'
default_type = int
format_unit = 'H'
c_ignored_default = "0"
def converter_init(self, *, bitwise=False):
if not bitwise:
fail("Unsigned shorts must be bitwise (for now).")
@add_legacy_c_converter('C', accept={str})
class int_converter(CConverter):
type = 'int'
default_type = int
format_unit = 'i'
c_ignored_default = "0"
def converter_init(self, *, accept={int}, type=None):
if accept == {str}:
self.format_unit = 'C'
elif accept != {int}:
fail("int_converter: illegal 'accept' argument " + repr(accept))
if type != None:
self.type = type
class unsigned_int_converter(CConverter):
type = 'unsigned int'
default_type = int
format_unit = 'I'
c_ignored_default = "0"
def converter_init(self, *, bitwise=False):
if not bitwise:
fail("Unsigned ints must be bitwise (for now).")
class long_converter(CConverter):
type = 'long'
default_type = int
format_unit = 'l'
c_ignored_default = "0"
class unsigned_long_converter(CConverter):
type = 'unsigned long'
default_type = int
format_unit = 'k'
c_ignored_default = "0"
def converter_init(self, *, bitwise=False):
if not bitwise:
fail("Unsigned longs must be bitwise (for now).")
class long_long_converter(CConverter):
type = 'long long'
default_type = int
format_unit = 'L'
c_ignored_default = "0"
class unsigned_long_long_converter(CConverter):
type = 'unsigned long long'
default_type = int
format_unit = 'K'
c_ignored_default = "0"
def converter_init(self, *, bitwise=False):
if not bitwise:
fail("Unsigned long long must be bitwise (for now).")
class Py_ssize_t_converter(CConverter):
type = 'Py_ssize_t'
c_ignored_default = "0"
def converter_init(self, *, accept={int}):
if accept == {int}:
self.format_unit = 'n'
self.default_type = int
elif accept == {int, NoneType}:
self.converter = '_Py_convert_optional_to_ssize_t'
else:
fail("Py_ssize_t_converter: illegal 'accept' argument " + repr(accept))
class slice_index_converter(CConverter):
type = 'Py_ssize_t'
def converter_init(self, *, accept={int, NoneType}):
if accept == {int}:
self.converter = '_PyEval_SliceIndexNotNone'
elif accept == {int, NoneType}:
self.converter = '_PyEval_SliceIndex'
else:
fail("slice_index_converter: illegal 'accept' argument " + repr(accept))
class float_converter(CConverter):
type = 'float'
default_type = float
format_unit = 'f'
c_ignored_default = "0.0"
class double_converter(CConverter):
type = 'double'
default_type = float
format_unit = 'd'
c_ignored_default = "0.0"
class Py_complex_converter(CConverter):
type = 'Py_complex'
default_type = complex
format_unit = 'D'
c_ignored_default = "{0.0, 0.0}"
class object_converter(CConverter):
type = 'PyObject *'
format_unit = 'O'
def converter_init(self, *, converter=None, type=None, subclass_of=None):
if converter:
if subclass_of:
fail("object: Cannot pass in both 'converter' and 'subclass_of'")
self.format_unit = 'O&'
self.converter = converter
elif subclass_of:
self.format_unit = 'O!'
self.subclass_of = subclass_of
if type is not None:
self.type = type
#
# We define three conventions for buffer types in the 'accept' argument:
#
# buffer : any object supporting the buffer interface
# rwbuffer: any object supporting the buffer interface, but must be writeable
# robuffer: any object supporting the buffer interface, but must not be writeable
#
class buffer: pass
class rwbuffer: pass
class robuffer: pass
def str_converter_key(types, encoding, zeroes):
return (frozenset(types), bool(encoding), bool(zeroes))
str_converter_argument_map = {}
class str_converter(CConverter):
type = 'const char *'
default_type = (str, Null, NoneType)
format_unit = 's'
def converter_init(self, *, accept={str}, encoding=None, zeroes=False):
key = str_converter_key(accept, encoding, zeroes)
format_unit = str_converter_argument_map.get(key)
if not format_unit:
fail("str_converter: illegal combination of arguments", key)
self.format_unit = format_unit
self.length = bool(zeroes)
if encoding:
if self.default not in (Null, None, unspecified):
fail("str_converter: Argument Clinic doesn't support default values for encoded strings")
self.encoding = encoding
self.type = 'char *'
# sorry, clinic can't support preallocated buffers
# for es# and et#
self.c_default = "NULL"
def cleanup(self):
if self.encoding:
name = ensure_legal_c_identifier(self.name)
return "".join(["if (", name, ") {\n PyMem_FREE(", name, ");\n}\n"])
#
# This is the fourth or fifth rewrite of registering all the
# crazy string converter format units. Previous approaches hid
# bugs--generally mismatches between the semantics of the format
# unit and the arguments necessary to represent those semantics
# properly. Hopefully with this approach we'll get it 100% right.
#
# The r() function (short for "register") both registers the
# mapping from arguments to format unit *and* registers the
# legacy C converter for that format unit.
#
def r(format_unit, *, accept, encoding=False, zeroes=False):
if not encoding and format_unit != 's':
# add the legacy c converters here too.
#
# note: add_legacy_c_converter can't work for
# es, es#, et, or et#
# because of their extra encoding argument
#
# also don't add the converter for 's' because
# the metaclass for CConverter adds it for us.
kwargs = {}
if accept != {str}:
kwargs['accept'] = accept
if zeroes:
kwargs['zeroes'] = True
added_f = functools.partial(str_converter, **kwargs)
legacy_converters[format_unit] = added_f
d = str_converter_argument_map
key = str_converter_key(accept, encoding, zeroes)
if key in d:
sys.exit("Duplicate keys specified for str_converter_argument_map!")
d[key] = format_unit
r('es', encoding=True, accept={str})
r('es#', encoding=True, zeroes=True, accept={str})
r('et', encoding=True, accept={bytes, bytearray, str})
r('et#', encoding=True, zeroes=True, accept={bytes, bytearray, str})
r('s', accept={str})
r('s#', zeroes=True, accept={robuffer, str})
r('y', accept={robuffer})
r('y#', zeroes=True, accept={robuffer})
r('z', accept={str, NoneType})
r('z#', zeroes=True, accept={robuffer, str, NoneType})
del r
class PyBytesObject_converter(CConverter):
type = 'PyBytesObject *'
format_unit = 'S'
# accept = {bytes}
class PyByteArrayObject_converter(CConverter):
type = 'PyByteArrayObject *'
format_unit = 'Y'
# accept = {bytearray}
class unicode_converter(CConverter):
type = 'PyObject *'
default_type = (str, Null, NoneType)
format_unit = 'U'
@add_legacy_c_converter('u#', zeroes=True)
@add_legacy_c_converter('Z', accept={str, NoneType})
@add_legacy_c_converter('Z#', accept={str, NoneType}, zeroes=True)
class Py_UNICODE_converter(CConverter):
type = 'Py_UNICODE *'
default_type = (str, Null, NoneType)
format_unit = 'u'
def converter_init(self, *, accept={str}, zeroes=False):
format_unit = 'Z' if accept=={str, NoneType} else 'u'
if zeroes:
format_unit += '#'
self.length = True
self.format_unit = format_unit
@add_legacy_c_converter('s*', accept={str, buffer})
@add_legacy_c_converter('z*', accept={str, buffer, NoneType})
@add_legacy_c_converter('w*', accept={rwbuffer})
class Py_buffer_converter(CConverter):
type = 'Py_buffer'
format_unit = 'y*'
impl_by_reference = True
c_ignored_default = "{NULL, NULL}"
def converter_init(self, *, accept={buffer}):
if self.default not in (unspecified, None):
fail("The only legal default value for Py_buffer is None.")
self.c_default = self.c_ignored_default
if accept == {str, buffer, NoneType}:
format_unit = 'z*'
elif accept == {str, buffer}:
format_unit = 's*'
elif accept == {buffer}:
format_unit = 'y*'
elif accept == {rwbuffer}:
format_unit = 'w*'
else:
fail("Py_buffer_converter: illegal combination of arguments")
self.format_unit = format_unit
def cleanup(self):
name = ensure_legal_c_identifier(self.name)
return "".join(["if (", name, ".obj) {\n PyBuffer_Release(&", name, ");\n}\n"])
def correct_name_for_self(f):
if f.kind in (CALLABLE, METHOD_INIT):
if f.cls:
return "PyObject *", "self"
return "PyObject *", "module"
if f.kind == STATIC_METHOD:
return "void *", "null"
if f.kind in (CLASS_METHOD, METHOD_NEW):
return "PyTypeObject *", "type"
raise RuntimeError("Unhandled type of function f: " + repr(f.kind))
def required_type_for_self_for_parser(f):
type, _ = correct_name_for_self(f)
if f.kind in (METHOD_INIT, METHOD_NEW, STATIC_METHOD, CLASS_METHOD):
return type
return None
class self_converter(CConverter):
"""
A special-case converter:
this is the default converter used for "self".
"""
type = None
format_unit = ''
def converter_init(self, *, type=None):
self.specified_type = type
def pre_render(self):
f = self.function
default_type, default_name = correct_name_for_self(f)
self.signature_name = default_name
self.type = self.specified_type or self.type or default_type
kind = self.function.kind
new_or_init = kind in (METHOD_NEW, METHOD_INIT)
if (kind == STATIC_METHOD) or new_or_init:
self.show_in_signature = False
# tp_new (METHOD_NEW) functions are of type newfunc:
# typedef PyObject *(*newfunc)(struct _typeobject *, PyObject *, PyObject *);
# PyTypeObject is a typedef for struct _typeobject.
#
# tp_init (METHOD_INIT) functions are of type initproc:
# typedef int (*initproc)(PyObject *, PyObject *, PyObject *);
#
# All other functions generated by Argument Clinic are stored in
# PyMethodDef structures, in the ml_meth slot, which is of type PyCFunction:
# typedef PyObject *(*PyCFunction)(PyObject *, PyObject *);
# However! We habitually cast these functions to PyCFunction,
# since functions that accept keyword arguments don't fit this signature
# but are stored there anyway. So strict type equality isn't important
# for these functions.
#
# So:
#
# * The name of the first parameter to the impl and the parsing function will always
# be self.name.
#
# * The type of the first parameter to the impl will always be of self.type.
#
# * If the function is neither tp_new (METHOD_NEW) nor tp_init (METHOD_INIT):
# * The type of the first parameter to the parsing function is also self.type.
# This means that if you step into the parsing function, your "self" parameter
# is of the correct type, which may make debugging more pleasant.
#
# * Else if the function is tp_new (METHOD_NEW):
# * The type of the first parameter to the parsing function is "PyTypeObject *",
# so the type signature of the function call is an exact match.
# * If self.type != "PyTypeObject *", we cast the first parameter to self.type
# in the impl call.
#
# * Else if the function is tp_init (METHOD_INIT):
# * The type of the first parameter to the parsing function is "PyObject *",
# so the type signature of the function call is an exact match.
# * If self.type != "PyObject *", we cast the first parameter to self.type
# in the impl call.
@property
def parser_type(self):
return required_type_for_self_for_parser(self.function) or self.type
def render(self, parameter, data):
"""
parameter is a clinic.Parameter instance.
data is a CRenderData instance.
"""
if self.function.kind == STATIC_METHOD:
return
self._render_self(parameter, data)
if self.type != self.parser_type:
# insert cast to impl_argument[0], aka self.
# we know we're in the first slot in all the CRenderData lists,
# because we render parameters in order, and self is always first.
assert len(data.impl_arguments) == 1
assert data.impl_arguments[0] == self.name
data.impl_arguments[0] = '(' + self.type + ")" + data.impl_arguments[0]
def set_template_dict(self, template_dict):
template_dict['self_name'] = self.name
template_dict['self_type'] = self.parser_type
kind = self.function.kind
cls = self.function.cls
if ((kind in (METHOD_NEW, METHOD_INIT)) and cls and cls.typedef):
if kind == METHOD_NEW:
passed_in_type = self.name
else:
passed_in_type = 'Py_TYPE({})'.format(self.name)
line = '({passed_in_type} == {type_object}) &&\n '
d = {
'type_object': self.function.cls.type_object,
'passed_in_type': passed_in_type
}
template_dict['self_type_check'] = line.format_map(d)
def add_c_return_converter(f, name=None):
if not name:
name = f.__name__
if not name.endswith('_return_converter'):
return f
name = name[:-len('_return_converter')]
return_converters[name] = f
return f
class CReturnConverterAutoRegister(type):
def __init__(cls, name, bases, classdict):
add_c_return_converter(cls)
class CReturnConverter(metaclass=CReturnConverterAutoRegister):
# The C type to use for this variable.
# 'type' should be a Python string specifying the type, e.g. "int".
# If this is a pointer type, the type string should end with ' *'.
type = 'PyObject *'
# The Python default value for this parameter, as a Python value.
# Or the magic value "unspecified" if there is no default.
default = None
def __init__(self, *, py_default=None, **kwargs):
self.py_default = py_default
try:
self.return_converter_init(**kwargs)
except TypeError as e:
s = ', '.join(name + '=' + repr(value) for name, value in kwargs.items())
sys.exit(self.__class__.__name__ + '(' + s + ')\n' + str(e))
def return_converter_init(self):
pass
def declare(self, data, name="_return_value"):
line = []
add = line.append
add(self.type)
if not self.type.endswith('*'):
add(' ')
add(name + ';')
data.declarations.append(''.join(line))
data.return_value = name
def err_occurred_if(self, expr, data):
data.return_conversion.append('if (({}) && PyErr_Occurred()) {{\n goto exit;\n}}\n'.format(expr))
def err_occurred_if_null_pointer(self, variable, data):
data.return_conversion.append('if ({} == NULL) {{\n goto exit;\n}}\n'.format(variable))
def render(self, function, data):
"""
function is a clinic.Function instance.
data is a CRenderData instance.
"""
pass
add_c_return_converter(CReturnConverter, 'object')
class NoneType_return_converter(CReturnConverter):
def render(self, function, data):
self.declare(data)
data.return_conversion.append('''
if (_return_value != Py_None) {
goto exit;
}
return_value = Py_None;
Py_INCREF(Py_None);
'''.strip())
class bool_return_converter(CReturnConverter):
type = 'int'
def render(self, function, data):
self.declare(data)
self.err_occurred_if("_return_value == -1", data)
data.return_conversion.append('return_value = PyBool_FromLong((long)_return_value);\n')
class long_return_converter(CReturnConverter):
type = 'long'
conversion_fn = 'PyLong_FromLong'
cast = ''
unsigned_cast = ''
def render(self, function, data):
self.declare(data)
self.err_occurred_if("_return_value == {}-1".format(self.unsigned_cast), data)
data.return_conversion.append(
''.join(('return_value = ', self.conversion_fn, '(', self.cast, '_return_value);\n')))
class int_return_converter(long_return_converter):
type = 'int'
cast = '(long)'
class init_return_converter(long_return_converter):
"""
Special return converter for __init__ functions.
"""
type = 'int'
cast = '(long)'
def render(self, function, data):
pass
class unsigned_long_return_converter(long_return_converter):
type = 'unsigned long'
conversion_fn = 'PyLong_FromUnsignedLong'
unsigned_cast = '(unsigned long)'
class unsigned_int_return_converter(unsigned_long_return_converter):
type = 'unsigned int'
cast = '(unsigned long)'
unsigned_cast = '(unsigned int)'
class Py_ssize_t_return_converter(long_return_converter):
type = 'Py_ssize_t'
conversion_fn = 'PyLong_FromSsize_t'
class size_t_return_converter(long_return_converter):
type = 'size_t'
conversion_fn = 'PyLong_FromSize_t'
unsigned_cast = '(size_t)'
class double_return_converter(CReturnConverter):
type = 'double'
cast = ''
def render(self, function, data):
self.declare(data)
self.err_occurred_if("_return_value == -1.0", data)
data.return_conversion.append(
'return_value = PyFloat_FromDouble(' + self.cast + '_return_value);\n')
class float_return_converter(double_return_converter):
type = 'float'
cast = '(double)'
class DecodeFSDefault_return_converter(CReturnConverter):
type = 'char *'
def render(self, function, data):
self.declare(data)
self.err_occurred_if_null_pointer("_return_value", data)
data.return_conversion.append(
'return_value = PyUnicode_DecodeFSDefault(_return_value);\n')
def eval_ast_expr(node, globals, *, filename='-'):
"""
Takes an ast.Expr node. Compiles and evaluates it.
Returns the result of the expression.
globals represents the globals dict the expression
should see. (There's no equivalent for "locals" here.)
"""
if isinstance(node, ast.Expr):
node = node.value
node = ast.Expression(node)
co = compile(node, filename, 'eval')
fn = types.FunctionType(co, globals)
return fn()
class IndentStack:
def __init__(self):
self.indents = []
self.margin = None
def _ensure(self):
if not self.indents:
fail('IndentStack expected indents, but none are defined.')
def measure(self, line):
"""
Returns the length of the line's margin.
"""
if '\t' in line:
fail('Tab characters are illegal in the Argument Clinic DSL.')
stripped = line.lstrip()
if not len(stripped):
# we can't tell anything from an empty line
# so just pretend it's indented like our current indent
self._ensure()
return self.indents[-1]
return len(line) - len(stripped)
def infer(self, line):
"""
Infer what is now the current margin based on this line.
Returns:
1 if we have indented (or this is the first margin)
0 if the margin has not changed
-N if we have dedented N times
"""
indent = self.measure(line)
margin = ' ' * indent
if not self.indents:
self.indents.append(indent)
self.margin = margin
return 1
current = self.indents[-1]
if indent == current:
return 0
if indent > current:
self.indents.append(indent)
self.margin = margin
return 1
# indent < current
if indent not in self.indents:
fail("Illegal outdent.")
outdent_count = 0
while indent != current:
self.indents.pop()
current = self.indents[-1]
outdent_count -= 1
self.margin = margin
return outdent_count
@property
def depth(self):
"""
Returns how many margins are currently defined.
"""
return len(self.indents)
def indent(self, line):
"""
Indents a line by the currently defined margin.
"""
return self.margin + line
def dedent(self, line):
"""
Dedents a line by the currently defined margin.
(The inverse of 'indent'.)
"""
margin = self.margin
indent = self.indents[-1]
if not line.startswith(margin):
fail('Cannot dedent, line does not start with the previous margin:')
return line[indent:]
class DSLParser:
def __init__(self, clinic):
self.clinic = clinic
self.directives = {}
for name in dir(self):
# functions that start with directive_ are added to directives
_, s, key = name.partition("directive_")
if s:
self.directives[key] = getattr(self, name)
# functions that start with at_ are too, with an @ in front
_, s, key = name.partition("at_")
if s:
self.directives['@' + key] = getattr(self, name)
self.reset()
def reset(self):
self.function = None
self.state = self.state_dsl_start
self.parameter_indent = None
self.keyword_only = False
self.positional_only = False
self.group = 0
self.parameter_state = self.ps_start
self.seen_positional_with_default = False
self.indent = IndentStack()
self.kind = CALLABLE
self.coexist = False
self.parameter_continuation = ''
self.preserve_output = False
def directive_version(self, required):
global version
if version_comparitor(version, required) < 0:
fail("Insufficient Clinic version!\n Version: " + version + "\n Required: " + required)
def directive_module(self, name):
fields = name.split('.')
new = fields.pop()
module, cls = self.clinic._module_and_class(fields)
if cls:
fail("Can't nest a module inside a class!")
if name in module.classes:
fail("Already defined module " + repr(name) + "!")
m = Module(name, module)
module.modules[name] = m
self.block.signatures.append(m)
def directive_class(self, name, typedef, type_object):
fields = name.split('.')
in_classes = False
parent = self
name = fields.pop()
so_far = []
module, cls = self.clinic._module_and_class(fields)
parent = cls or module
if name in parent.classes:
fail("Already defined class " + repr(name) + "!")
c = Class(name, module, cls, typedef, type_object)
parent.classes[name] = c
self.block.signatures.append(c)
def directive_set(self, name, value):
if name not in ("line_prefix", "line_suffix"):
fail("unknown variable", repr(name))
value = value.format_map({
'block comment start': '/*',
'block comment end': '*/',
})
self.clinic.__dict__[name] = value
def directive_destination(self, name, command, *args):
if command == 'new':
self.clinic.add_destination(name, *args)
return
if command == 'clear':
self.clinic.get_destination(name).clear()
fail("unknown destination command", repr(command))
def directive_output(self, command_or_name, destination=''):
fd = self.clinic.destination_buffers
if command_or_name == "preset":
preset = self.clinic.presets.get(destination)
if not preset:
fail("Unknown preset " + repr(destination) + "!")
fd.update(preset)
return
if command_or_name == "push":
self.clinic.destination_buffers_stack.append(fd.copy())
return
if command_or_name == "pop":
if not self.clinic.destination_buffers_stack:
fail("Can't 'output pop', stack is empty!")
previous_fd = self.clinic.destination_buffers_stack.pop()
fd.update(previous_fd)
return
# secret command for debugging!
if command_or_name == "print":
self.block.output.append(pprint.pformat(fd))
self.block.output.append('\n')
return
d = self.clinic.get_destination(destination)
if command_or_name == "everything":
for name in list(fd):
fd[name] = d
return
if command_or_name not in fd:
fail("Invalid command / destination name " + repr(command_or_name) + ", must be one of:\n preset push pop print everything " + " ".join(fd))
fd[command_or_name] = d
def directive_dump(self, name):
self.block.output.append(self.clinic.get_destination(name).dump())
def directive_print(self, *args):
self.block.output.append(' '.join(args))
self.block.output.append('\n')
def directive_preserve(self):
if self.preserve_output:
fail("Can't have preserve twice in one block!")
self.preserve_output = True
def at_classmethod(self):
if self.kind is not CALLABLE:
fail("Can't set @classmethod, function is not a normal callable")
self.kind = CLASS_METHOD
def at_staticmethod(self):
if self.kind is not CALLABLE:
fail("Can't set @staticmethod, function is not a normal callable")
self.kind = STATIC_METHOD
def at_coexist(self):
if self.coexist:
fail("Called @coexist twice!")
self.coexist = True
def parse(self, block):
self.reset()
self.block = block
self.saved_output = self.block.output
block.output = []
block_start = self.clinic.block_parser.line_number
lines = block.input.split('\n')
for line_number, line in enumerate(lines, self.clinic.block_parser.block_start_line_number):
if '\t' in line:
fail('Tab characters are illegal in the Clinic DSL.\n\t' + repr(line), line_number=block_start)
self.state(line)
self.next(self.state_terminal)
self.state(None)
block.output.extend(self.clinic.language.render(clinic, block.signatures))
if self.preserve_output:
if block.output:
fail("'preserve' only works for blocks that don't produce any output!")
block.output = self.saved_output
@staticmethod
def ignore_line(line):
# ignore comment-only lines
if line.lstrip().startswith('#'):
return True
# Ignore empty lines too
# (but not in docstring sections!)
if not line.strip():
return True
return False
@staticmethod
def calculate_indent(line):
return len(line) - len(line.strip())
def next(self, state, line=None):
# real_print(self.state.__name__, "->", state.__name__, ", line=", line)
self.state = state
if line is not None:
self.state(line)
def state_dsl_start(self, line):
# self.block = self.ClinicOutputBlock(self)
if self.ignore_line(line):
return
# is it a directive?
fields = shlex.split(line)
directive_name = fields[0]
directive = self.directives.get(directive_name, None)
if directive:
try:
directive(*fields[1:])
except TypeError as e:
fail(str(e))
return
self.next(self.state_modulename_name, line)
def state_modulename_name(self, line):
# looking for declaration, which establishes the leftmost column
# line should be
# modulename.fnname [as c_basename] [-> return annotation]
# square brackets denote optional syntax.
#
# alternatively:
# modulename.fnname [as c_basename] = modulename.existing_fn_name
# clones the parameters and return converter from that
# function. you can't modify them. you must enter a
# new docstring.
#
# (but we might find a directive first!)
#
# this line is permitted to start with whitespace.
# we'll call this number of spaces F (for "function").
if not line.strip():
return
self.indent.infer(line)
# are we cloning?
before, equals, existing = line.rpartition('=')
if equals:
full_name, _, c_basename = before.partition(' as ')
full_name = full_name.strip()
c_basename = c_basename.strip()
existing = existing.strip()
if (is_legal_py_identifier(full_name) and
(not c_basename or is_legal_c_identifier(c_basename)) and
is_legal_py_identifier(existing)):
# we're cloning!
fields = [x.strip() for x in existing.split('.')]
function_name = fields.pop()
module, cls = self.clinic._module_and_class(fields)
for existing_function in (cls or module).functions:
if existing_function.name == function_name:
break
else:
existing_function = None
if not existing_function:
print("class", cls, "module", module, "existing", existing)
print("cls. functions", cls.functions)
fail("Couldn't find existing function " + repr(existing) + "!")
fields = [x.strip() for x in full_name.split('.')]
function_name = fields.pop()
module, cls = self.clinic._module_and_class(fields)
if not (existing_function.kind == self.kind and existing_function.coexist == self.coexist):
fail("'kind' of function and cloned function don't match! (@classmethod/@staticmethod/@coexist)")
self.function = existing_function.copy(name=function_name, full_name=full_name, module=module, cls=cls, c_basename=c_basename, docstring='')
self.block.signatures.append(self.function)
(cls or module).functions.append(self.function)
self.next(self.state_function_docstring)
return
line, _, returns = line.partition('->')
full_name, _, c_basename = line.partition(' as ')
full_name = full_name.strip()
c_basename = c_basename.strip() or None
if not is_legal_py_identifier(full_name):
fail("Illegal function name: {}".format(full_name))
if c_basename and not is_legal_c_identifier(c_basename):
fail("Illegal C basename: {}".format(c_basename))
return_converter = None
if returns:
ast_input = "def x() -> {}: pass".format(returns)
module = None
try:
module = ast.parse(ast_input)
except SyntaxError:
pass
if not module:
fail("Badly-formed annotation for " + full_name + ": " + returns)
try:
name, legacy, kwargs = self.parse_converter(module.body[0].returns)
if legacy:
fail("Legacy converter {!r} not allowed as a return converter"
.format(name))
if name not in return_converters:
fail("No available return converter called " + repr(name))
return_converter = return_converters[name](**kwargs)
except ValueError:
fail("Badly-formed annotation for " + full_name + ": " + returns)
fields = [x.strip() for x in full_name.split('.')]
function_name = fields.pop()
module, cls = self.clinic._module_and_class(fields)
fields = full_name.split('.')
if fields[-1] == '__new__':
if (self.kind != CLASS_METHOD) or (not cls):
fail("__new__ must be a class method!")
self.kind = METHOD_NEW
elif fields[-1] == '__init__':
if (self.kind != CALLABLE) or (not cls):
fail("__init__ must be a normal method, not a class or static method!")
self.kind = METHOD_INIT
if not return_converter:
return_converter = init_return_converter()
elif fields[-1] in unsupported_special_methods:
fail(fields[-1] + " is a special method and cannot be converted to Argument Clinic! (Yet.)")
if not return_converter:
return_converter = CReturnConverter()
if not module:
fail("Undefined module used in declaration of " + repr(full_name.strip()) + ".")
self.function = Function(name=function_name, full_name=full_name, module=module, cls=cls, c_basename=c_basename,
return_converter=return_converter, kind=self.kind, coexist=self.coexist)
self.block.signatures.append(self.function)
# insert a self converter automatically
type, name = correct_name_for_self(self.function)
kwargs = {}
if cls and type == "PyObject *":
kwargs['type'] = cls.typedef
sc = self.function.self_converter = self_converter(name, name, self.function, **kwargs)
p_self = Parameter(sc.name, inspect.Parameter.POSITIONAL_ONLY, function=self.function, converter=sc)
self.function.parameters[sc.name] = p_self
(cls or module).functions.append(self.function)
self.next(self.state_parameters_start)
# Now entering the parameters section. The rules, formally stated:
#
# * All lines must be indented with spaces only.
# * The first line must be a parameter declaration.
# * The first line must be indented.
# * This first line establishes the indent for parameters.
# * We'll call this number of spaces P (for "parameter").
# * Thenceforth:
# * Lines indented with P spaces specify a parameter.
# * Lines indented with > P spaces are docstrings for the previous
# parameter.
# * We'll call this number of spaces D (for "docstring").
# * All subsequent lines indented with >= D spaces are stored as
# part of the per-parameter docstring.
# * All lines will have the first D spaces of the indent stripped
# before they are stored.
# * It's illegal to have a line starting with a number of spaces X
# such that P < X < D.
# * A line with < P spaces is the first line of the function
# docstring, which ends processing for parameters and per-parameter
# docstrings.
# * The first line of the function docstring must be at the same
# indent as the function declaration.
# * It's illegal to have any line in the parameters section starting
# with X spaces such that F < X < P. (As before, F is the indent
# of the function declaration.)
#
# Also, currently Argument Clinic places the following restrictions on groups:
# * Each group must contain at least one parameter.
# * Each group may contain at most one group, which must be the furthest
# thing in the group from the required parameters. (The nested group
# must be the first in the group when it's before the required
# parameters, and the last thing in the group when after the required
# parameters.)
# * There may be at most one (top-level) group to the left or right of
# the required parameters.
# * You must specify a slash, and it must be after all parameters.
# (In other words: either all parameters are positional-only,
# or none are.)
#
# Said another way:
# * Each group must contain at least one parameter.
# * All left square brackets before the required parameters must be
# consecutive. (You can't have a left square bracket followed
# by a parameter, then another left square bracket. You can't
# have a left square bracket, a parameter, a right square bracket,
# and then a left square bracket.)
# * All right square brackets after the required parameters must be
# consecutive.
#
# These rules are enforced with a single state variable:
# "parameter_state". (Previously the code was a miasma of ifs and
# separate boolean state variables.) The states are:
#
# [ [ a, b, ] c, ] d, e, f=3, [ g, h, [ i ] ] <- line
# 01 2 3 4 5 6 <- state transitions
#
# 0: ps_start. before we've seen anything. legal transitions are to 1 or 3.
# 1: ps_left_square_before. left square brackets before required parameters.
# 2: ps_group_before. in a group, before required parameters.
# 3: ps_required. required parameters, positional-or-keyword or positional-only
# (we don't know yet). (renumber left groups!)
# 4: ps_optional. positional-or-keyword or positional-only parameters that
# now must have default values.
# 5: ps_group_after. in a group, after required parameters.
# 6: ps_right_square_after. right square brackets after required parameters.
ps_start, ps_left_square_before, ps_group_before, ps_required, \
ps_optional, ps_group_after, ps_right_square_after = range(7)
def state_parameters_start(self, line):
if self.ignore_line(line):
return
# if this line is not indented, we have no parameters
if not self.indent.infer(line):
return self.next(self.state_function_docstring, line)
self.parameter_continuation = ''
return self.next(self.state_parameter, line)
def to_required(self):
"""
Transition to the "required" parameter state.
"""
if self.parameter_state != self.ps_required:
self.parameter_state = self.ps_required
for p in self.function.parameters.values():
p.group = -p.group
def state_parameter(self, line):
if self.parameter_continuation:
line = self.parameter_continuation + ' ' + line.lstrip()
self.parameter_continuation = ''
if self.ignore_line(line):
return
assert self.indent.depth == 2
indent = self.indent.infer(line)
if indent == -1:
# we outdented, must be to definition column
return self.next(self.state_function_docstring, line)
if indent == 1:
# we indented, must be to new parameter docstring column
return self.next(self.state_parameter_docstring_start, line)
line = line.rstrip()
if line.endswith('\\'):
self.parameter_continuation = line[:-1]
return
line = line.lstrip()
if line in ('*', '/', '[', ']'):
self.parse_special_symbol(line)
return
if self.parameter_state in (self.ps_start, self.ps_required):
self.to_required()
elif self.parameter_state == self.ps_left_square_before:
self.parameter_state = self.ps_group_before
elif self.parameter_state == self.ps_group_before:
if not self.group:
self.to_required()
elif self.parameter_state in (self.ps_group_after, self.ps_optional):
pass
else:
fail("Function " + self.function.name + " has an unsupported group configuration. (Unexpected state " + str(self.parameter_state) + ".a)")
# handle "as" for parameters too
c_name = None
name, have_as_token, trailing = line.partition(' as ')
if have_as_token:
name = name.strip()
if ' ' not in name:
fields = trailing.strip().split(' ')
if not fields:
fail("Invalid 'as' clause!")
c_name = fields[0]
if c_name.endswith(':'):
name += ':'
c_name = c_name[:-1]
fields[0] = name
line = ' '.join(fields)
base, equals, default = line.rpartition('=')
if not equals:
base = default
default = None
module = None
try:
ast_input = "def x({}): pass".format(base)
module = ast.parse(ast_input)
except SyntaxError:
try:
# the last = was probably inside a function call, like
# c: int(accept={str})
# so assume there was no actual default value.
default = None
ast_input = "def x({}): pass".format(line)
module = ast.parse(ast_input)
except SyntaxError:
pass
if not module:
fail("Function " + self.function.name + " has an invalid parameter declaration:\n\t" + line)
function_args = module.body[0].args
if len(function_args.args) > 1:
fail("Function " + self.function.name + " has an invalid parameter declaration (comma?):\n\t" + line)
if function_args.defaults or function_args.kw_defaults:
fail("Function " + self.function.name + " has an invalid parameter declaration (default value?):\n\t" + line)
if function_args.vararg or function_args.kwarg:
fail("Function " + self.function.name + " has an invalid parameter declaration (*args? **kwargs?):\n\t" + line)
parameter = function_args.args[0]
parameter_name = parameter.arg
name, legacy, kwargs = self.parse_converter(parameter.annotation)
if not default:
if self.parameter_state == self.ps_optional:
fail("Can't have a parameter without a default (" + repr(parameter_name) + ")\nafter a parameter with a default!")
value = unspecified
if 'py_default' in kwargs:
fail("You can't specify py_default without specifying a default value!")
else:
if self.parameter_state == self.ps_required:
self.parameter_state = self.ps_optional
default = default.strip()
bad = False
ast_input = "x = {}".format(default)
bad = False
try:
module = ast.parse(ast_input)
if 'c_default' not in kwargs:
# we can only represent very simple data values in C.
# detect whether default is okay, via a blacklist
# of disallowed ast nodes.
class DetectBadNodes(ast.NodeVisitor):
bad = False
def bad_node(self, node):
self.bad = True
# inline function call
visit_Call = bad_node
# inline if statement ("x = 3 if y else z")
visit_IfExp = bad_node
# comprehensions and generator expressions
visit_ListComp = visit_SetComp = bad_node
visit_DictComp = visit_GeneratorExp = bad_node
# literals for advanced types
visit_Dict = visit_Set = bad_node
visit_List = visit_Tuple = bad_node
# "starred": "a = [1, 2, 3]; *a"
visit_Starred = bad_node
# allow ellipsis, for now
# visit_Ellipsis = bad_node
blacklist = DetectBadNodes()
blacklist.visit(module)
bad = blacklist.bad
else:
# if they specify a c_default, we can be more lenient about the default value.
# but at least make an attempt at ensuring it's a valid expression.
try:
value = eval(default)
if value == unspecified:
fail("'unspecified' is not a legal default value!")
except NameError:
pass # probably a named constant
except Exception as e:
fail("Malformed expression given as default value\n"
"{!r} caused {!r}".format(default, e))
if bad:
fail("Unsupported expression as default value: " + repr(default))
expr = module.body[0].value
# mild hack: explicitly support NULL as a default value
if isinstance(expr, ast.Name) and expr.id == 'NULL':
value = NULL
py_default = 'None'
c_default = "NULL"
elif (isinstance(expr, ast.BinOp) or
(isinstance(expr, ast.UnaryOp) and not isinstance(expr.operand, ast.Num))):
c_default = kwargs.get("c_default")
if not (isinstance(c_default, str) and c_default):
fail("When you specify an expression (" + repr(default) + ") as your default value,\nyou MUST specify a valid c_default.")
py_default = default
value = unknown
elif isinstance(expr, ast.Attribute):
a = []
n = expr
while isinstance(n, ast.Attribute):
a.append(n.attr)
n = n.value
if not isinstance(n, ast.Name):
fail("Unsupported default value " + repr(default) + " (looked like a Python constant)")
a.append(n.id)
py_default = ".".join(reversed(a))
c_default = kwargs.get("c_default")
if not (isinstance(c_default, str) and c_default):
fail("When you specify a named constant (" + repr(py_default) + ") as your default value,\nyou MUST specify a valid c_default.")
try:
value = eval(py_default)
except NameError:
value = unknown
else:
value = ast.literal_eval(expr)
py_default = repr(value)
if isinstance(value, (bool, None.__class__)):
c_default = "Py_" + py_default
elif isinstance(value, str):
c_default = c_repr(value)
else:
c_default = py_default
except SyntaxError as e:
fail("Syntax error: " + repr(e.text))
except (ValueError, AttributeError):
value = unknown
c_default = kwargs.get("c_default")
py_default = default
if not (isinstance(c_default, str) and c_default):
fail("When you specify a named constant (" + repr(py_default) + ") as your default value,\nyou MUST specify a valid c_default.")
kwargs.setdefault('c_default', c_default)
kwargs.setdefault('py_default', py_default)
dict = legacy_converters if legacy else converters
legacy_str = "legacy " if legacy else ""
if name not in dict:
fail('{} is not a valid {}converter'.format(name, legacy_str))
# if you use a c_name for the parameter, we just give that name to the converter
# but the parameter object gets the python name
converter = dict[name](c_name or parameter_name, parameter_name, self.function, value, **kwargs)
kind = inspect.Parameter.KEYWORD_ONLY if self.keyword_only else inspect.Parameter.POSITIONAL_OR_KEYWORD
if isinstance(converter, self_converter):
if len(self.function.parameters) == 1:
if (self.parameter_state != self.ps_required):
fail("A 'self' parameter cannot be marked optional.")
if value is not unspecified:
fail("A 'self' parameter cannot have a default value.")
if self.group:
fail("A 'self' parameter cannot be in an optional group.")
kind = inspect.Parameter.POSITIONAL_ONLY
self.parameter_state = self.ps_start
self.function.parameters.clear()
else:
fail("A 'self' parameter, if specified, must be the very first thing in the parameter block.")
p = Parameter(parameter_name, kind, function=self.function, converter=converter, default=value, group=self.group)
if parameter_name in self.function.parameters:
fail("You can't have two parameters named " + repr(parameter_name) + "!")
self.function.parameters[parameter_name] = p
def parse_converter(self, annotation):
if isinstance(annotation, ast.Str):
return annotation.s, True, {}
if isinstance(annotation, ast.Name):
return annotation.id, False, {}
if not isinstance(annotation, ast.Call):
fail("Annotations must be either a name, a function call, or a string.")
name = annotation.func.id
symbols = globals()
kwargs = {node.arg: eval_ast_expr(node.value, symbols) for node in annotation.keywords}
return name, False, kwargs
def parse_special_symbol(self, symbol):
if symbol == '*':
if self.keyword_only:
fail("Function " + self.function.name + " uses '*' more than once.")
self.keyword_only = True
elif symbol == '[':
if self.parameter_state in (self.ps_start, self.ps_left_square_before):
self.parameter_state = self.ps_left_square_before
elif self.parameter_state in (self.ps_required, self.ps_group_after):
self.parameter_state = self.ps_group_after
else:
fail("Function " + self.function.name + " has an unsupported group configuration. (Unexpected state " + str(self.parameter_state) + ".b)")
self.group += 1
self.function.docstring_only = True
elif symbol == ']':
if not self.group:
fail("Function " + self.function.name + " has a ] without a matching [.")
if not any(p.group == self.group for p in self.function.parameters.values()):
fail("Function " + self.function.name + " has an empty group.\nAll groups must contain at least one parameter.")
self.group -= 1
if self.parameter_state in (self.ps_left_square_before, self.ps_group_before):
self.parameter_state = self.ps_group_before
elif self.parameter_state in (self.ps_group_after, self.ps_right_square_after):
self.parameter_state = self.ps_right_square_after
else:
fail("Function " + self.function.name + " has an unsupported group configuration. (Unexpected state " + str(self.parameter_state) + ".c)")
elif symbol == '/':
if self.positional_only:
fail("Function " + self.function.name + " uses '/' more than once.")
self.positional_only = True
# ps_required and ps_optional are allowed here, that allows positional-only without option groups
# to work (and have default values!)
if (self.parameter_state not in (self.ps_required, self.ps_optional, self.ps_right_square_after, self.ps_group_before)) or self.group:
fail("Function " + self.function.name + " has an unsupported group configuration. (Unexpected state " + str(self.parameter_state) + ".d)")
if self.keyword_only:
fail("Function " + self.function.name + " mixes keyword-only and positional-only parameters, which is unsupported.")
# fixup preceding parameters
for p in self.function.parameters.values():
if (p.kind != inspect.Parameter.POSITIONAL_OR_KEYWORD and not isinstance(p.converter, self_converter)):
fail("Function " + self.function.name + " mixes keyword-only and positional-only parameters, which is unsupported.")
p.kind = inspect.Parameter.POSITIONAL_ONLY
def state_parameter_docstring_start(self, line):
self.parameter_docstring_indent = len(self.indent.margin)
assert self.indent.depth == 3
return self.next(self.state_parameter_docstring, line)
# every line of the docstring must start with at least F spaces,
# where F > P.
# these F spaces will be stripped.
def state_parameter_docstring(self, line):
stripped = line.strip()
if stripped.startswith('#'):
return
indent = self.indent.measure(line)
if indent < self.parameter_docstring_indent:
self.indent.infer(line)
assert self.indent.depth < 3
if self.indent.depth == 2:
# back to a parameter
return self.next(self.state_parameter, line)
assert self.indent.depth == 1
return self.next(self.state_function_docstring, line)
assert self.function.parameters
last_parameter = next(reversed(list(self.function.parameters.values())))
new_docstring = last_parameter.docstring
if new_docstring:
new_docstring += '\n'
if stripped:
new_docstring += self.indent.dedent(line)
last_parameter.docstring = new_docstring
# the final stanza of the DSL is the docstring.
def state_function_docstring(self, line):
if self.group:
fail("Function " + self.function.name + " has a ] without a matching [.")
stripped = line.strip()
if stripped.startswith('#'):
return
new_docstring = self.function.docstring
if new_docstring:
new_docstring += "\n"
if stripped:
line = self.indent.dedent(line).rstrip()
else:
line = ''
new_docstring += line
self.function.docstring = new_docstring
def format_docstring(self):
f = self.function
new_or_init = f.kind in (METHOD_NEW, METHOD_INIT)
if new_or_init and not f.docstring:
# don't render a docstring at all, no signature, nothing.
return f.docstring
text, add, output = _text_accumulator()
parameters = f.render_parameters
##
## docstring first line
##
if new_or_init:
# classes get *just* the name of the class
# not __new__, not __init__, and not module.classname
assert f.cls
add(f.cls.name)
else:
add(f.name)
add('(')
# populate "right_bracket_count" field for every parameter
assert parameters, "We should always have a self parameter. " + repr(f)
assert isinstance(parameters[0].converter, self_converter)
# self is always positional-only.
assert parameters[0].is_positional_only()
parameters[0].right_bracket_count = 0
positional_only = True
for p in parameters[1:]:
if not p.is_positional_only():
positional_only = False
else:
assert positional_only
if positional_only:
p.right_bracket_count = abs(p.group)
else:
# don't put any right brackets around non-positional-only parameters, ever.
p.right_bracket_count = 0
right_bracket_count = 0
def fix_right_bracket_count(desired):
nonlocal right_bracket_count
s = ''
while right_bracket_count < desired:
s += '['
right_bracket_count += 1
while right_bracket_count > desired:
s += ']'
right_bracket_count -= 1
return s
need_slash = False
added_slash = False
need_a_trailing_slash = False
# we only need a trailing slash:
# * if this is not a "docstring_only" signature
# * and if the last *shown* parameter is
# positional only
if not f.docstring_only:
for p in reversed(parameters):
if not p.converter.show_in_signature:
continue
if p.is_positional_only():
need_a_trailing_slash = True
break
added_star = False
first_parameter = True
last_p = parameters[-1]
line_length = len(''.join(text))
indent = " " * line_length
def add_parameter(text):
nonlocal line_length
nonlocal first_parameter
if first_parameter:
s = text
first_parameter = False
else:
s = ' ' + text
if line_length + len(s) >= 72:
add('\n')
add(indent)
line_length = len(indent)
s = text
line_length += len(s)
add(s)
for p in parameters:
if not p.converter.show_in_signature:
continue
assert p.name
is_self = isinstance(p.converter, self_converter)
if is_self and f.docstring_only:
# this isn't a real machine-parsable signature,
# so let's not print the "self" parameter
continue
if p.is_positional_only():
need_slash = not f.docstring_only
elif need_slash and not (added_slash or p.is_positional_only()):
added_slash = True
add_parameter('/,')
if p.is_keyword_only() and not added_star:
added_star = True
add_parameter('*,')
p_add, p_output = text_accumulator()
p_add(fix_right_bracket_count(p.right_bracket_count))
if isinstance(p.converter, self_converter):
# annotate first parameter as being a "self".
#
# if inspect.Signature gets this function,
# and it's already bound, the self parameter
# will be stripped off.
#
# if it's not bound, it should be marked
# as positional-only.
#
# note: we don't print "self" for __init__,
# because this isn't actually the signature
# for __init__. (it can't be, __init__ doesn't
# have a docstring.) if this is an __init__
# (or __new__), then this signature is for
# calling the class to construct a new instance.
p_add('$')
name = p.converter.signature_name or p.name
p_add(name)
if p.converter.is_optional():
p_add('=')
value = p.converter.py_default
if not value:
value = repr(p.converter.default)
p_add(value)
if (p != last_p) or need_a_trailing_slash:
p_add(',')
add_parameter(p_output())
add(fix_right_bracket_count(0))
if need_a_trailing_slash:
add_parameter('/')
add(')')
# PEP 8 says:
#
# The Python standard library will not use function annotations
# as that would result in a premature commitment to a particular
# annotation style. Instead, the annotations are left for users
# to discover and experiment with useful annotation styles.
#
# therefore this is commented out:
#
# if f.return_converter.py_default:
# add(' -> ')
# add(f.return_converter.py_default)
if not f.docstring_only:
add("\n" + sig_end_marker + "\n")
docstring_first_line = output()
# now fix up the places where the brackets look wrong
docstring_first_line = docstring_first_line.replace(', ]', ',] ')
# okay. now we're officially building the "parameters" section.
# create substitution text for {parameters}
spacer_line = False
for p in parameters:
if not p.docstring.strip():
continue
if spacer_line:
add('\n')
else:
spacer_line = True
add(" ")
add(p.name)
add('\n')
add(textwrap.indent(rstrip_lines(p.docstring.rstrip()), " "))
parameters = output()
if parameters:
parameters += '\n'
##
## docstring body
##
docstring = f.docstring.rstrip()
lines = [line.rstrip() for line in docstring.split('\n')]
# Enforce the summary line!
# The first line of a docstring should be a summary of the function.
# It should fit on one line (80 columns? 79 maybe?) and be a paragraph
# by itself.
#
# Argument Clinic enforces the following rule:
# * either the docstring is empty,
# * or it must have a summary line.
#
# Guido said Clinic should enforce this:
# http://mail.python.org/pipermail/python-dev/2013-June/127110.html
if len(lines) >= 2:
if lines[1]:
fail("Docstring for " + f.full_name + " does not have a summary line!\n" +
"Every non-blank function docstring must start with\n" +
"a single line summary followed by an empty line.")
elif len(lines) == 1:
# the docstring is only one line right now--the summary line.
# add an empty line after the summary line so we have space
# between it and the {parameters} we're about to add.
lines.append('')
parameters_marker_count = len(docstring.split('{parameters}')) - 1
if parameters_marker_count > 1:
fail('You may not specify {parameters} more than once in a docstring!')
if not parameters_marker_count:
# insert after summary line
lines.insert(2, '{parameters}')
# insert at front of docstring
lines.insert(0, docstring_first_line)
docstring = "\n".join(lines)
add(docstring)
docstring = output()
docstring = linear_format(docstring, parameters=parameters)
docstring = docstring.rstrip()
return docstring
def state_terminal(self, line):
"""
Called when processing the block is done.
"""
assert not line
if not self.function:
return
if self.keyword_only:
values = self.function.parameters.values()
if not values:
no_parameter_after_star = True
else:
last_parameter = next(reversed(list(values)))
no_parameter_after_star = last_parameter.kind != inspect.Parameter.KEYWORD_ONLY
if no_parameter_after_star:
fail("Function " + self.function.name + " specifies '*' without any parameters afterwards.")
# remove trailing whitespace from all parameter docstrings
for name, value in self.function.parameters.items():
if not value:
continue
value.docstring = value.docstring.rstrip()
self.function.docstring = self.format_docstring()
# maps strings to callables.
# the callable should return an object
# that implements the clinic parser
# interface (__init__ and parse).
#
# example parsers:
# "clinic", handles the Clinic DSL
# "python", handles running Python code
#
parsers = {'clinic' : DSLParser, 'python': PythonParser}
clinic = None
def main(argv):
import sys
if sys.version_info.major < 3 or sys.version_info.minor < 3:
sys.exit("Error: clinic.py requires Python 3.3 or greater.")
import argparse
cmdline = argparse.ArgumentParser()
cmdline.add_argument("-f", "--force", action='store_true')
cmdline.add_argument("-o", "--output", type=str)
cmdline.add_argument("-v", "--verbose", action='store_true')
cmdline.add_argument("--converters", action='store_true')
cmdline.add_argument("--make", action='store_true',
help="Walk --srcdir to run over all relevant files.")
cmdline.add_argument("--srcdir", type=str, default=os.curdir,
help="The directory tree to walk in --make mode.")
cmdline.add_argument("filename", type=str, nargs="*")
ns = cmdline.parse_args(argv)
if ns.converters:
if ns.filename:
print("Usage error: can't specify --converters and a filename at the same time.")
print()
cmdline.print_usage()
sys.exit(-1)
converters = []
return_converters = []
ignored = set("""
add_c_converter
add_c_return_converter
add_default_legacy_c_converter
add_legacy_c_converter
""".strip().split())
module = globals()
for name in module:
for suffix, ids in (
("_return_converter", return_converters),
("_converter", converters),
):
if name in ignored:
continue
if name.endswith(suffix):
ids.append((name, name[:-len(suffix)]))
break
print()
print("Legacy converters:")
legacy = sorted(legacy_converters)
print(' ' + ' '.join(c for c in legacy if c[0].isupper()))
print(' ' + ' '.join(c for c in legacy if c[0].islower()))
print()
for title, attribute, ids in (
("Converters", 'converter_init', converters),
("Return converters", 'return_converter_init', return_converters),
):
print(title + ":")
longest = -1
for name, short_name in ids:
longest = max(longest, len(short_name))
for name, short_name in sorted(ids, key=lambda x: x[1].lower()):
cls = module[name]
callable = getattr(cls, attribute, None)
if not callable:
continue
signature = inspect.signature(callable)
parameters = []
for parameter_name, parameter in signature.parameters.items():
if parameter.kind == inspect.Parameter.KEYWORD_ONLY:
if parameter.default != inspect.Parameter.empty:
s = '{}={!r}'.format(parameter_name, parameter.default)
else:
s = parameter_name
parameters.append(s)
print(' {}({})'.format(short_name, ', '.join(parameters)))
print()
print("All converters also accept (c_default=None, py_default=None, annotation=None).")
print("All return converters also accept (py_default=None).")
sys.exit(0)
if ns.make:
if ns.output or ns.filename:
print("Usage error: can't use -o or filenames with --make.")
print()
cmdline.print_usage()
sys.exit(-1)
if not ns.srcdir:
print("Usage error: --srcdir must not be empty with --make.")
print()
cmdline.print_usage()
sys.exit(-1)
for root, dirs, files in os.walk(ns.srcdir):
for rcs_dir in ('.svn', '.git', '.hg', 'build', 'externals'):
if rcs_dir in dirs:
dirs.remove(rcs_dir)
for filename in files:
if not (filename.endswith('.c') or filename.endswith('.h')):
continue
path = os.path.join(root, filename)
if ns.verbose:
print(path)
parse_file(path, force=ns.force, verify=not ns.force)
return
if not ns.filename:
cmdline.print_usage()
sys.exit(-1)
if ns.output and len(ns.filename) > 1:
print("Usage error: can't use -o with multiple filenames.")
print()
cmdline.print_usage()
sys.exit(-1)
for filename in ns.filename:
if ns.verbose:
print(filename)
parse_file(filename, output=ns.output, force=ns.force, verify=not ns.force)
if __name__ == "__main__":
sys.exit(main(sys.argv[1:]))
|
FFMG/myoddweb.piger
|
monitor/api/python/Python-3.7.2/Tools/clinic/clinic.py
|
Python
|
gpl-2.0
| 154,883
|
[
"VisIt"
] |
737cba91663d9d65df55fd85462596d154d061a953c1d437a5f79e5a6a50ce0e
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017, the cclib development team
#
# This file is part of cclib (http://cclib.github.io) and is distributed under
# the terms of the BSD 3-Clause License.
import sys
class TextProgress:
def __init__(self):
self.nstep = 0
self.text = None
self.oldprogress = 0
self.progress = 0
self.calls = 0
def initialize(self, nstep, text=None):
self.nstep = float(nstep)
self.text = text
#sys.stdout.write("\n")
def update(self, step, text=None):
self.progress = int(step * 100 / self.nstep)
if self.progress/2 >= self.oldprogress/2 + 1 or self.text != text:
# just went through at least an interval of ten, ie. from 39 to 41,
# so update
mystr = "\r["
prog = int(self.progress / 10)
mystr += prog * "=" + (10-prog) * "-"
mystr += "] %3i" % self.progress + "%"
if text:
mystr += " "+text
sys.stdout.write("\r" + 70 * " ")
sys.stdout.flush()
sys.stdout.write(mystr)
sys.stdout.flush()
self.oldprogress = self.progress
if self.progress >= 100 and text == "Done":
print(" ")
return
|
cclib/cclib
|
cclib/progress/textprogress.py
|
Python
|
bsd-3-clause
| 1,307
|
[
"cclib"
] |
100f44bfddc25546a54c49f4c3ff6b04b0bc0e3d1621ec5846bd535f04489c91
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
#from enthought.tvtk.api import tvtk
#from enthought.tvtk.tools import ivtk
#from mayavi import ivtk
#from gui.plotutils import AsyncCall
#from enthought.traits.api import Instance
#from ray_trace.system import System
#from wx import CallAfter
#class _PlotFrame(ivtk.IVTK):
# opsys=Instance(System)
# def __init__(self, **traits):
# ivtk.IVTK.__init__(self,**traits)
# #self.open()
# #self.plot()
#
# def plot(self):
# AsyncCall(self.open).Wait()
# actor=self.opsys.tvtk_actor()
# AsyncCall(self.scene.add_actor, actor).Wait()
# self.reset()
# def clear(self):
# AsyncCall(self.open).Wait()
# AsyncCall(self.scene.renderer.remove_all_view_props).Wait()
# self.reset()#
# def reset(self):
# AsyncCall(self.scene.reset_zoom)
# def close(self):
# AsyncCall(self._close)
# #AsyncCall(ivtk.IVTK.close, self)
# def _close(self):
# self.scene.renderer.remove_all_view_props()
# ivtk.IVTK.close(self)
#def PlotFrame(**traits):
# OB= AsyncCall(_PlotFrame, **traits).Wait()
# OB.plot()
# return OB
#_pf= Instance(tvtk_frame,()) #this initializes the tvtk_frame
# def __init__(self,opsys):
#HasTraits.__init__(self,**traits)
# self._pf=AsyncCall(tvtk_frame, opsys).Wait()
# AsyncCall(self._pf.open).Wait()
#self._pf.scene.background=(0,0,0)
# if self.opsys!=None:
# print self.opsys
#self.actor=self.opsys.tvtk_actor()
# self._pf.plot(self.opsys)
#self._pf.reset()
# def get_scene(self):
# """ Returns the tvtk scene
# """
# return self._pf.scene
# def add_text(self,text="",scale=(1.,1.,1.), position=(0.,0.,0.),follower=True):
#atext = tvtk.TextSource()
# atext = tvtk.VectorText()
# atext.text=(text)
# textMapper = tvtk.PolyDataMapper()
# textMapper.input=atext.output
#textActor = tvtk.Actor()
# textActor = tvtk.Follower()
# textActor.mapper=textMapper
# textActor.scale=scale
# textActor.position=position
# self._pf.scene.add_actor(textActor)
# if follower==True:
# textActor.camera=self._pf.scene.camera
# self._pf.reset()
# return textActor
# def add_caption(self,caption="",attachment_point=(0,0,0),border=False):
#
# cap=tvtk.CaptionActor2D(caption=caption,
# attachment_point=attachment_point,
# border=border, position2=(.12,.05))
# #position2 is to make the caption smaller, but I dont really
# #understand how it is working
# self._pf.scene.add_actor(cap)
# return cap
# Create the axes and the associated mapper and actor.
#def axes_actor(origin=(0,0,0), scale=(1.,1.,1.)):
# axes = tvtk.Axes()
# axes.origin=(0, 0, 0)
# axesMapper = tvtk.PolyDataMapper()
# axesMapper.input=axes.output
# #SetInputConnection(axes.GetOutputPort())
# axesActor = tvtk.Actor()
# axesActor.mapper=axesMapper
# axesActor
# return axesActor
|
coupdair/pyoptools
|
pyoptools/gui/plot_frame.py
|
Python
|
bsd-3-clause
| 3,213
|
[
"Mayavi"
] |
146465a3b164499a520b447771e05a7b7044b5bee5534175b10109281869e864
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains the GaussianDropout layer."""
# pylint: disable=g-classes-have-attributes,g-direct-tensorflow-import
from keras import backend
from keras.engine import base_layer
from keras.utils import tf_utils
import numpy as np
import tensorflow.compat.v2 as tf
from tensorflow.python.util.tf_export import keras_export
@keras_export('keras.layers.GaussianDropout')
class GaussianDropout(base_layer.BaseRandomLayer):
"""Apply multiplicative 1-centered Gaussian noise.
As it is a regularization layer, it is only active at training time.
Args:
rate: Float, drop probability (as with `Dropout`).
The multiplicative noise will have
standard deviation `sqrt(rate / (1 - rate))`.
seed: Integer, optional random seed to enable deterministic behavior.
Call arguments:
inputs: Input tensor (of any rank).
training: Python boolean indicating whether the layer should behave in
training mode (adding dropout) or in inference mode (doing nothing).
Input shape:
Arbitrary. Use the keyword argument `input_shape`
(tuple of integers, does not include the samples axis)
when using this layer as the first layer in a model.
Output shape:
Same shape as input.
"""
def __init__(self, rate, seed=None, **kwargs):
super(GaussianDropout, self).__init__(seed=seed, **kwargs)
self.supports_masking = True
self.rate = rate
self.seed = seed
def call(self, inputs, training=None):
if 0 < self.rate < 1:
def noised():
stddev = np.sqrt(self.rate / (1.0 - self.rate))
return inputs * self._random_generator.random_normal(
shape=tf.shape(inputs),
mean=1.0,
stddev=stddev,
dtype=inputs.dtype)
return backend.in_train_phase(noised, inputs, training=training)
return inputs
def get_config(self):
config = {'rate': self.rate, 'seed': self.seed}
base_config = super(GaussianDropout, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@tf_utils.shape_type_conversion
def compute_output_shape(self, input_shape):
return input_shape
|
keras-team/keras
|
keras/layers/regularization/gaussian_dropout.py
|
Python
|
apache-2.0
| 2,819
|
[
"Gaussian"
] |
236ad0f8b44fc0ac2926592e3e067295ec4d78f159934e089003d2712ab6a129
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.